code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_lowerCamelCase : List[str] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_lowerCamelCase : Any = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_lowerCamelCase : int = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _lowerCAmelCase ( __magic_name__ :str , __magic_name__ :str ):
UpperCAmelCase_ = len([g for position, g in enumerate(__magic_name__ ) if g == main_target[position]] )
return (item, float(__magic_name__ ))
def _lowerCAmelCase ( __magic_name__ :str , __magic_name__ :str ):
UpperCAmelCase_ = random.randint(0 , len(__magic_name__ ) - 1 )
UpperCAmelCase_ = parent_a[:random_slice] + parent_a[random_slice:]
UpperCAmelCase_ = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _lowerCAmelCase ( __magic_name__ :str , __magic_name__ :list[str] ):
UpperCAmelCase_ = list(__magic_name__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
UpperCAmelCase_ = random.choice(__magic_name__ )
return "".join(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ :tuple[str, float] , __magic_name__ :list[tuple[str, float]] , __magic_name__ :list[str] , ):
UpperCAmelCase_ = []
# Generate more children proportionally to the fitness score.
UpperCAmelCase_ = int(parent_a[1] * 1_0_0 ) + 1
UpperCAmelCase_ = 1_0 if child_n >= 1_0 else child_n
for _ in range(__magic_name__ ):
UpperCAmelCase_ = population_score[random.randint(0 , __magic_name__ )][0]
UpperCAmelCase_, UpperCAmelCase_ = crossover(parent_a[0] , __magic_name__ )
# Append new string to the population list.
pop.append(mutate(__magic_name__ , __magic_name__ ) )
pop.append(mutate(__magic_name__ , __magic_name__ ) )
return pop
def _lowerCAmelCase ( __magic_name__ :str , __magic_name__ :list[str] , __magic_name__ :bool = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
UpperCAmelCase_ = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(__magic_name__ )
# Verify that the target contains no genes besides the ones inside genes variable.
UpperCAmelCase_ = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
UpperCAmelCase_ = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(__magic_name__ )
# Generate random starting population.
UpperCAmelCase_ = []
for _ in range(__magic_name__ ):
population.append(''''''.join([random.choice(__magic_name__ ) for i in range(len(__magic_name__ ) )] ) )
# Just some logs to know what the algorithms is doing.
UpperCAmelCase_, UpperCAmelCase_ = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__magic_name__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
UpperCAmelCase_ = [evaluate(__magic_name__ , __magic_name__ ) for item in population]
# Check if there is a matching evolution.
UpperCAmelCase_ = sorted(__magic_name__ , key=lambda __magic_name__ : x[1] , reverse=__magic_name__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 1_0 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
UpperCAmelCase_ = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__magic_name__ )
# Normalize population score to be between 0 and 1.
UpperCAmelCase_ = [
(item, score / len(__magic_name__ )) for item, score in population_score
]
# This is selection
for i in range(__magic_name__ ):
population.extend(select(population_score[int(__magic_name__ )] , __magic_name__ , __magic_name__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__magic_name__ ) > N_POPULATION:
break
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
_lowerCamelCase : Optional[Any] = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = basic(target_str, genes_list)
print(
f"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"
)
| 121 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : str = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : str = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 121 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : Tuple = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 710 |
import pytest
SCREAMING_SNAKE_CASE : Optional[Any] = "__dummy_dataset1__"
SCREAMING_SNAKE_CASE : int = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def UpperCamelCase_( ) -> Dict:
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def UpperCamelCase_( ) -> List[Any]:
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : List[str] = dataset_loading_script_name
_lowercase : Union[str, Any] = tmp_path / 'datasets' / script_name
script_dir.mkdir(parents=lowerCamelCase_ )
_lowercase : Optional[int] = script_dir / F'''{script_name}.py'''
with open(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
| 354 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self : int , lowerCAmelCase : int , lowerCAmelCase : Any=7 , lowerCAmelCase : List[str]=3 , lowerCAmelCase : int=30 , lowerCAmelCase : Union[str, Any]=400 , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Tuple=None , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , lowerCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Any=1 / 255 , lowerCAmelCase : Tuple=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowercase : str = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
lowercase : Any = parent
lowercase : Optional[int] = batch_size
lowercase : Optional[int] = num_channels
lowercase : List[str] = min_resolution
lowercase : Optional[int] = max_resolution
lowercase : List[str] = do_resize
lowercase : List[Any] = size
lowercase : Union[str, Any] = do_normalize
lowercase : Dict = image_mean
lowercase : List[str] = image_std
lowercase : Any = do_rescale
lowercase : Optional[Any] = rescale_factor
lowercase : Tuple = do_pad
def _lowerCAmelCase ( self : Dict ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any]=False ):
if not batched:
lowercase : Optional[int] = image_inputs[0]
if isinstance(lowerCAmelCase , Image.Image ):
lowercase , lowercase : List[Any] = image.size
else:
lowercase , lowercase : List[Any] = image.shape[1], image.shape[2]
if w < h:
lowercase : Optional[int] = int(self.size['''shortest_edge'''] * h / w )
lowercase : Union[str, Any] = self.size['''shortest_edge''']
elif w > h:
lowercase : Optional[Any] = self.size['''shortest_edge''']
lowercase : Dict = int(self.size['''shortest_edge'''] * w / h )
else:
lowercase : Tuple = self.size['''shortest_edge''']
lowercase : Optional[Any] = self.size['''shortest_edge''']
else:
lowercase : Optional[int] = []
for image in image_inputs:
lowercase , lowercase : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase : Any = max(lowerCAmelCase , key=lambda lowerCAmelCase : item[0] )[0]
lowercase : Tuple = max(lowerCAmelCase , key=lambda lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase ( __lowerCamelCase , unittest.TestCase ):
a__: Optional[int] = ConditionalDetrImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self : Optional[int] ):
lowercase : Dict = ConditionalDetrImageProcessingTester(self )
@property
def _lowerCAmelCase ( self : Tuple ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self : Optional[int] ):
lowercase : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''size''' ) )
def _lowerCAmelCase ( self : Union[str, Any] ):
lowercase : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase )
lowercase : List[str] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase )
def _lowerCAmelCase ( self : Any ):
pass
def _lowerCAmelCase ( self : Tuple ):
# Initialize image_processing
lowercase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
lowercase : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : List[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase , lowercase : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase , batched=lowerCAmelCase )
lowercase : List[Any] = image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCAmelCase ( self : Union[str, Any] ):
# Initialize image_processing
lowercase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
lowercase : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : Tuple = self.image_processor_tester.get_expected_values(lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase : Dict = image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : Tuple = self.image_processor_tester.get_expected_values(lowerCAmelCase , batched=lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCAmelCase ( self : Optional[int] ):
# Initialize image_processing
lowercase : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
lowercase : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase : Any = image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : Dict = self.image_processor_tester.get_expected_values(lowerCAmelCase , batched=lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowerCAmelCase ( self : Optional[int] ):
# prepare image and target
lowercase : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
lowercase : Dict = json.loads(f.read() )
lowercase : int = {'''image_id''': 3_9769, '''annotations''': target}
# encode them
lowercase : int = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
lowercase : int = image_processing(images=lowerCAmelCase , annotations=lowerCAmelCase , return_tensors='''pt''' )
# verify pixel values
lowercase : List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , lowerCAmelCase )
lowercase : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCAmelCase , atol=1E-4 ) )
# verify area
lowercase : Optional[Any] = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCAmelCase ) )
# verify boxes
lowercase : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCAmelCase )
lowercase : str = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCAmelCase , atol=1E-3 ) )
# verify image_id
lowercase : int = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCAmelCase ) )
# verify is_crowd
lowercase : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCAmelCase ) )
# verify class_labels
lowercase : Optional[int] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCAmelCase ) )
# verify orig_size
lowercase : List[str] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCAmelCase ) )
# verify size
lowercase : Optional[int] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCAmelCase ) )
@slow
def _lowerCAmelCase ( self : Dict ):
# prepare image, target and masks_path
lowercase : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
lowercase : Union[str, Any] = json.loads(f.read() )
lowercase : Optional[int] = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target}
lowercase : Any = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowercase : Optional[Any] = ConditionalDetrImageProcessor(format='''coco_panoptic''' )
lowercase : List[str] = image_processing(images=lowerCAmelCase , annotations=lowerCAmelCase , masks_path=lowerCAmelCase , return_tensors='''pt''' )
# verify pixel values
lowercase : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , lowerCAmelCase )
lowercase : Union[str, Any] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCAmelCase , atol=1E-4 ) )
# verify area
lowercase : Optional[Any] = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCAmelCase ) )
# verify boxes
lowercase : Any = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCAmelCase )
lowercase : Any = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCAmelCase , atol=1E-3 ) )
# verify image_id
lowercase : List[str] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCAmelCase ) )
# verify is_crowd
lowercase : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCAmelCase ) )
# verify class_labels
lowercase : List[str] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCAmelCase ) )
# verify masks
lowercase : List[Any] = 82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , lowerCAmelCase )
# verify orig_size
lowercase : Optional[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCAmelCase ) )
# verify size
lowercase : List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCAmelCase ) )
| 583 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
snake_case__ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase )
class UpperCAmelCase ( __lowerCamelCase ):
def __init__( self : List[Any] , **lowerCAmelCase : List[Any] ):
super().__init__(**lowerCAmelCase )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self : List[Any] , lowerCAmelCase : Union[np.ndarray, bytes, str] , **lowerCAmelCase : Tuple ):
return super().__call__(lowerCAmelCase , **lowerCAmelCase )
def _lowerCAmelCase ( self : str , **lowerCAmelCase : Optional[int] ):
lowercase : List[Any] = {}
if "candidate_labels" in kwargs:
lowercase : Union[str, Any] = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
lowercase : Any = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def _lowerCAmelCase ( self : int , lowerCAmelCase : Dict , lowerCAmelCase : int=None , lowerCAmelCase : List[Any]="This is a sound of {}." ):
if isinstance(lowerCAmelCase , lowerCAmelCase ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
lowercase : str = requests.get(lowerCAmelCase ).content
else:
with open(lowerCAmelCase , '''rb''' ) as f:
lowercase : List[Any] = f.read()
if isinstance(lowerCAmelCase , lowerCAmelCase ):
lowercase : Any = ffmpeg_read(lowerCAmelCase , self.feature_extractor.sampling_rate )
if not isinstance(lowerCAmelCase , np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
lowercase : str = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='''pt''' )
lowercase : Any = candidate_labels
lowercase : Optional[int] = [hypothesis_template.format(lowerCAmelCase ) for x in candidate_labels]
lowercase : Tuple = self.tokenizer(lowerCAmelCase , return_tensors=self.framework , padding=lowerCAmelCase )
lowercase : Optional[Any] = [text_inputs]
return inputs
def _lowerCAmelCase ( self : str , lowerCAmelCase : Union[str, Any] ):
lowercase : Union[str, Any] = model_inputs.pop('''candidate_labels''' )
lowercase : Union[str, Any] = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , lowerCAmelCase ):
lowercase : int = text_inputs[0]
else:
# Batching case.
lowercase : Dict = text_inputs[0][0]
lowercase : str = self.model(**lowerCAmelCase , **lowerCAmelCase )
lowercase : Tuple = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase : str ):
lowercase : Optional[int] = model_outputs.pop('''candidate_labels''' )
lowercase : Any = model_outputs['''logits'''][0]
if self.framework == "pt":
lowercase : str = logits.softmax(dim=0 )
lowercase : List[Any] = probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
lowercase : str = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowerCAmelCase , lowerCAmelCase ) , key=lambda lowerCAmelCase : -x[0] )
]
return result
| 583 | 1 |
import math
import tensorflow as tf
from packaging import version
def _lowercase ( UpperCamelCase__ : Union[str, Any] ):
__A : Dict = tf.convert_to_tensor(UpperCamelCase__ )
__A : Optional[int] = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ), x.dtype ) ))
return x * cdf
def _lowercase ( UpperCamelCase__ : Any ):
__A : int = tf.convert_to_tensor(UpperCamelCase__ )
__A : List[Any] = tf.cast(math.pi, x.dtype )
__A : Dict = tf.cast(0.044715, x.dtype )
__A : Dict = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(UpperCamelCase__, 3 )) ))
return x * cdf
def _lowercase ( UpperCamelCase__ : Optional[int] ):
__A : List[Any] = tf.convert_to_tensor(UpperCamelCase__ )
return x * tf.tanh(tf.math.softplus(UpperCamelCase__ ) )
def _lowercase ( UpperCamelCase__ : Union[str, Any] ):
__A : Any = tf.convert_to_tensor(UpperCamelCase__ )
__A : Union[str, Any] = tf.cast(0.044715, x.dtype )
__A : List[Any] = tf.cast(0.7978845608, x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def _lowercase ( UpperCamelCase__ : Optional[int] ):
__A : Any = tf.convert_to_tensor(UpperCamelCase__ )
__A : int = tf.cast(1.702, x.dtype )
return x * tf.math.sigmoid(coeff * x )
def _lowercase ( UpperCamelCase__ : Union[str, Any] ):
return tf.clip_by_value(_gelu(UpperCamelCase__ ), -10, 10 )
def _lowercase ( UpperCamelCase__ : Optional[int], UpperCamelCase__ : Optional[int]=-1 ):
__A : Dict = tf.split(UpperCamelCase__, 2, axis=UpperCamelCase__ )
return a * tf.math.sigmoid(UpperCamelCase__ )
if version.parse(tf.version.VERSION) >= version.parse('2.4'):
def _lowercase ( UpperCamelCase__ : Tuple ):
return tf.keras.activations.gelu(UpperCamelCase__, approximate=UpperCamelCase__ )
UpperCAmelCase_ : Any = tf.keras.activations.gelu
UpperCAmelCase_ : List[str] = approximate_gelu_wrap
else:
UpperCAmelCase_ : Dict = _gelu
UpperCAmelCase_ : List[str] = _gelu_new
UpperCAmelCase_ : str = {
'gelu': gelu,
'gelu_10': gelu_aa,
'gelu_fast': gelu_fast,
'gelu_new': gelu_new,
'glu': glu,
'mish': mish,
'quick_gelu': quick_gelu,
'relu': tf.keras.activations.relu,
'sigmoid': tf.keras.activations.sigmoid,
'silu': tf.keras.activations.swish,
'swish': tf.keras.activations.swish,
'tanh': tf.keras.activations.tanh,
}
def _lowercase ( UpperCamelCase__ : Optional[int] ):
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
| 713 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _lowercase ( UpperCamelCase__ : Optional[int], UpperCamelCase__ : Tuple, UpperCamelCase__ : Tuple, UpperCamelCase__ : List[Any], UpperCamelCase__ : Optional[int] ):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
__A : Optional[int] = TapasConfig.from_json_file(UpperCamelCase__ )
# set absolute/relative position embeddings parameter
__A : List[str] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__A : Optional[int] = TapasForQuestionAnswering(config=UpperCamelCase__ )
elif task == "WTQ":
# run_task_main.py hparams
__A : List[str] = 4
__A : Any = True
# hparam_utils.py hparams
__A : Any = 0.664694
__A : Dict = 0.207951
__A : Optional[Any] = 0.121194
__A : Any = True
__A : Any = True
__A : int = False
__A : Optional[int] = 0.0352513
__A : List[Any] = TapasForQuestionAnswering(config=UpperCamelCase__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__A : Tuple = 4
__A : Optional[int] = False
# hparam_utils.py hparams
__A : List[str] = 36.4519
__A : Union[str, Any] = 0.903421
__A : List[str] = 222.088
__A : Optional[Any] = True
__A : Optional[int] = True
__A : Union[str, Any] = True
__A : str = 0.763141
__A : Union[str, Any] = TapasForQuestionAnswering(config=UpperCamelCase__ )
elif task == "TABFACT":
__A : Union[str, Any] = TapasForSequenceClassification(config=UpperCamelCase__ )
elif task == "MLM":
__A : Any = TapasForMaskedLM(config=UpperCamelCase__ )
elif task == "INTERMEDIATE_PRETRAINING":
__A : List[Any] = TapasModel(config=UpperCamelCase__ )
else:
raise ValueError(f"""Task {task} not supported.""" )
print(f"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# Save pytorch-model (weights and configuration)
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCamelCase__ )
# Save tokenizer files
print(f"""Save tokenizer files to {pytorch_dump_path}""" )
__A : int = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt', model_max_length=512 )
tokenizer.save_pretrained(UpperCamelCase__ )
print('Used relative position embeddings:', model.config.reset_position_index_per_cell )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCAmelCase_ : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 540 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class a :
_lowercase = None
_lowercase = None
_lowercase = None # sigma(t_i)
@classmethod
def _UpperCAmelCase ( cls ):
'''simple docstring'''
return cls()
@dataclass
class a ( __a ):
_lowercase = 42
_lowercase = 42
_lowercase = 42
class a ( __a , __a ):
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return True
@register_to_config
def __init__( self , A_ = 0.02 , A_ = 100 , A_ = 1.0_07 , A_ = 80 , A_ = 0.05 , A_ = 50 , ):
'''simple docstring'''
pass
def _UpperCAmelCase ( self ):
'''simple docstring'''
return KarrasVeSchedulerState.create()
def _UpperCAmelCase ( self , A_ , A_ , A_ = () ):
'''simple docstring'''
_UpperCAmelCase : Dict = jnp.arange(0 , __a )[::-1].copy()
_UpperCAmelCase : Dict = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__a , schedule=jnp.array(__a , dtype=jnp.floataa ) , timesteps=__a , )
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
_UpperCAmelCase : Tuple = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
_UpperCAmelCase : List[str] = 0
# sample eps ~ N(0, S_noise^2 * I)
_UpperCAmelCase : str = random.split(__a , num=1 )
_UpperCAmelCase : List[Any] = self.config.s_noise * random.normal(key=__a , shape=sample.shape )
_UpperCAmelCase : str = sigma + gamma * sigma
_UpperCAmelCase : Union[str, Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ = True , ):
'''simple docstring'''
_UpperCAmelCase : List[str] = sample_hat + sigma_hat * model_output
_UpperCAmelCase : List[Any] = (sample_hat - pred_original_sample) / sigma_hat
_UpperCAmelCase : Dict = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__a , derivative=__a , state=__a )
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ = True , ):
'''simple docstring'''
_UpperCAmelCase : str = sample_prev + sigma_prev * model_output
_UpperCAmelCase : Optional[int] = (sample_prev - pred_original_sample) / sigma_prev
_UpperCAmelCase : Any = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__a , derivative=__a , state=__a )
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
raise NotImplementedError()
| 300 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase : List[str] = {
'''configuration_conditional_detr''': [
'''CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ConditionalDetrConfig''',
'''ConditionalDetrOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = ['''ConditionalDetrFeatureExtractor''']
lowerCamelCase : List[Any] = ['''ConditionalDetrImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'''CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConditionalDetrForObjectDetection''',
'''ConditionalDetrForSegmentation''',
'''ConditionalDetrModel''',
'''ConditionalDetrPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 149 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Dict ) ->str:
'''simple docstring'''
_UpperCAmelCase : str = tempfile.mkdtemp()
_UpperCAmelCase : Tuple = BlipImageProcessor()
_UpperCAmelCase : Union[str, Any] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
_UpperCAmelCase : Optional[int] = BlipProcessor(lowerCamelCase__ , lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : Optional[Any] , **lowerCamelCase__ : int ) ->Optional[int]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase__ ).tokenizer
def lowerCAmelCase__ ( self : List[Any] , **lowerCamelCase__ : Dict ) ->Any:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase__ ).image_processor
def lowerCAmelCase__ ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : List[Any] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : str = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_UpperCAmelCase : List[Any] = [Image.fromarray(np.moveaxis(lowerCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self : str ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : List[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCAmelCase : Optional[int] = self.get_image_processor(do_normalize=lowerCamelCase__ , padding_value=1.0 )
_UpperCAmelCase : int = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : int = self.get_image_processor()
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : Any = BlipProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
_UpperCAmelCase : str = self.prepare_image_inputs()
_UpperCAmelCase : str = image_processor(lowerCamelCase__ , return_tensors="np" )
_UpperCAmelCase : Dict = processor(images=lowerCamelCase__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase__ ( self : str ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.get_image_processor()
_UpperCAmelCase : str = self.get_tokenizer()
_UpperCAmelCase : List[Any] = BlipProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
_UpperCAmelCase : Tuple = "lower newer"
_UpperCAmelCase : str = processor(text=lowerCamelCase__ )
_UpperCAmelCase : int = tokenizer(lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Any = self.get_image_processor()
_UpperCAmelCase : Any = self.get_tokenizer()
_UpperCAmelCase : Tuple = BlipProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
_UpperCAmelCase : Tuple = "lower newer"
_UpperCAmelCase : Any = self.prepare_image_inputs()
_UpperCAmelCase : Dict = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def lowerCAmelCase__ ( self : str ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.get_image_processor()
_UpperCAmelCase : List[Any] = self.get_tokenizer()
_UpperCAmelCase : Optional[int] = BlipProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
_UpperCAmelCase : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase : List[Any] = processor.batch_decode(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.get_image_processor()
_UpperCAmelCase : Dict = self.get_tokenizer()
_UpperCAmelCase : Union[str, Any] = BlipProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = "lower newer"
_UpperCAmelCase : Dict = self.prepare_image_inputs()
_UpperCAmelCase : str = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 717 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : int , lowerCamelCase__ : str , lowerCamelCase__ : str=13 , lowerCamelCase__ : Dict=7 , lowerCamelCase__ : str=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : int=True , lowerCamelCase__ : Tuple=99 , lowerCamelCase__ : Optional[int]=32 , lowerCamelCase__ : str=5 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Any=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Optional[int]=5_12 , lowerCamelCase__ : Any=16 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Optional[Any]=0.0_2 , lowerCamelCase__ : Optional[int]=4 , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : str = parent
_UpperCAmelCase : Optional[int] = batch_size
_UpperCAmelCase : List[Any] = seq_length
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : int = use_attention_mask
_UpperCAmelCase : List[Any] = use_token_type_ids
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : int = type_sequence_label_size
_UpperCAmelCase : List[str] = initializer_range
_UpperCAmelCase : Union[str, Any] = num_choices
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Any = None
if self.use_attention_mask:
_UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : int = None
if self.use_token_type_ids:
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = config_and_inputs
_UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = config_and_inputs
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Tuple = True
lowerCAmelCase : Tuple = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase : Any = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : str = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Tuple = model(lowerCamelCase__ )[0]
_UpperCAmelCase : int = [1, 11, 5_02_65]
self.assertEqual(list(output.shape ) , lowerCamelCase__ )
# compare the actual values for a slice.
_UpperCAmelCase : int = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Optional[Any] = model(lowerCamelCase__ )[0]
# compare the actual values for a slice.
_UpperCAmelCase : str = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 40 | 0 |
from typing import Any
def UpperCamelCase_( _A :list )-> list[Any]:
if not input_list:
return []
UpperCamelCase__ = [input_list.count(_A ) for value in input_list]
UpperCamelCase__ = max(_A ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(_A ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 551 |
import re
import string
import numpy as np
import datasets
__UpperCamelCase = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__UpperCamelCase = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__UpperCamelCase = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def snake_case__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def snake_case__ ( self , snake_case , snake_case , snake_case=None , snake_case=False , snake_case=False , snake_case=False , ):
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
UpperCamelCase__ = np.array([re.sub(snake_case , "" , snake_case ) for x in predictions] )
UpperCamelCase__ = np.array([re.sub(snake_case , "" , snake_case ) for x in references] )
else:
UpperCamelCase__ = np.asarray(snake_case )
UpperCamelCase__ = np.asarray(snake_case )
if ignore_case:
UpperCamelCase__ = np.char.lower(snake_case )
UpperCamelCase__ = np.char.lower(snake_case )
if ignore_punctuation:
UpperCamelCase__ = string.punctuation.maketrans("" , "" , string.punctuation )
UpperCamelCase__ = np.char.translate(snake_case , table=snake_case )
UpperCamelCase__ = np.char.translate(snake_case , table=snake_case )
if ignore_numbers:
UpperCamelCase__ = string.digits.maketrans("" , "" , string.digits )
UpperCamelCase__ = np.char.translate(snake_case , table=snake_case )
UpperCamelCase__ = np.char.translate(snake_case , table=snake_case )
UpperCamelCase__ = predictions == references
return {"exact_match": np.mean(snake_case ) * 100}
| 551 | 1 |
"""simple docstring"""
def a ( __UpperCAmelCase : int = 1_0_0_0 ) -> int:
__magic_name__: Any = 2**power
__magic_name__: Any = str(__UpperCAmelCase )
__magic_name__: str = list(__UpperCAmelCase )
__magic_name__: str = 0
for i in list_num:
sum_of_num += int(__UpperCAmelCase )
return sum_of_num
if __name__ == "__main__":
__lowerCamelCase = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
__lowerCamelCase = solution(power)
print('Sum of the digits is: ', result)
| 213 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = "ClapFeatureExtractor"
UpperCAmelCase__ = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self : int , __snake_case : Any , __snake_case : Union[str, Any] ) -> Optional[Any]:
super().__init__(__snake_case , __snake_case )
def __call__( self : str , __snake_case : int=None , __snake_case : Any=None , __snake_case : str=None , **__snake_case : Any ) -> int:
__magic_name__: Any = kwargs.pop("""sampling_rate""" , __snake_case )
if text is None and audios is None:
raise ValueError("""You have to specify either text or audios. Both cannot be none.""" )
if text is not None:
__magic_name__: List[str] = self.tokenizer(__snake_case , return_tensors=__snake_case , **__snake_case )
if audios is not None:
__magic_name__: Dict = self.feature_extractor(
__snake_case , sampling_rate=__snake_case , return_tensors=__snake_case , **__snake_case )
if text is not None and audios is not None:
__magic_name__: int = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__snake_case ) , tensor_type=__snake_case )
def lowerCamelCase__ ( self : Optional[Any] , *__snake_case : Optional[int] , **__snake_case : Optional[int] ) -> Optional[int]:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def lowerCamelCase__ ( self : List[str] , *__snake_case : Tuple , **__snake_case : List[str] ) -> Optional[Any]:
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[int]:
__magic_name__: List[str] = self.tokenizer.model_input_names
__magic_name__: List[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 213 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__SCREAMING_SNAKE_CASE :Optional[int] = logging.getLogger(__name__)
@dataclass
class A_ :
_lowerCamelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_lowerCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_lowerCamelCase : Optional[str] = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
_lowerCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_lowerCamelCase : bool = field(default=lowerCAmelCase_ , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_lowerCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class A_ :
_lowerCamelCase : str = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
_lowerCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
_lowerCamelCase : int = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowerCamelCase : bool = field(
default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def UpperCAmelCase_ ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
" --overwrite_output_dir to overcome." )
_UpperCAmelCase = import_module("tasks" )
try:
_UpperCAmelCase = getattr(__lowercase , model_args.task_type )
_UpperCAmelCase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
f'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , __lowercase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
_UpperCAmelCase = token_classification_task.get_labels(data_args.labels )
_UpperCAmelCase = dict(enumerate(__lowercase ) )
_UpperCAmelCase = len(__lowercase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowercase , idalabel=__lowercase , labelaid={label: i for i, label in enumerate(__lowercase )} , cache_dir=model_args.cache_dir , )
_UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
_UpperCAmelCase = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , )
# Get datasets
_UpperCAmelCase = (
TokenClassificationDataset(
token_classification_task=__lowercase , data_dir=data_args.data_dir , tokenizer=__lowercase , labels=__lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_UpperCAmelCase = (
TokenClassificationDataset(
token_classification_task=__lowercase , data_dir=data_args.data_dir , tokenizer=__lowercase , labels=__lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(__lowercase : np.ndarray , __lowercase : np.ndarray ) -> Tuple[List[int], List[int]]:
_UpperCAmelCase = np.argmax(__lowercase , axis=2 )
_UpperCAmelCase , _UpperCAmelCase = preds.shape
_UpperCAmelCase = [[] for _ in range(__lowercase )]
_UpperCAmelCase = [[] for _ in range(__lowercase )]
for i in range(__lowercase ):
for j in range(__lowercase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(__lowercase : EvalPrediction ) -> Dict:
_UpperCAmelCase , _UpperCAmelCase = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(__lowercase , __lowercase ),
"precision": precision_score(__lowercase , __lowercase ),
"recall": recall_score(__lowercase , __lowercase ),
"f1": fa_score(__lowercase , __lowercase ),
}
# Data collator
_UpperCAmelCase = DataCollatorWithPadding(__lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=__lowercase , args=__lowercase , train_dataset=__lowercase , eval_dataset=__lowercase , compute_metrics=__lowercase , data_collator=__lowercase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCAmelCase = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_UpperCAmelCase = trainer.evaluate()
_UpperCAmelCase = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_process_zero():
with open(__lowercase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , __lowercase , __lowercase )
writer.write("%s = %s\n" % (key, value) )
results.update(__lowercase )
# Predict
if training_args.do_predict:
_UpperCAmelCase = TokenClassificationDataset(
token_classification_task=__lowercase , data_dir=data_args.data_dir , tokenizer=__lowercase , labels=__lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = trainer.predict(__lowercase )
_UpperCAmelCase , _UpperCAmelCase = align_predictions(__lowercase , __lowercase )
_UpperCAmelCase = os.path.join(training_args.output_dir , "test_results.txt" )
if trainer.is_world_process_zero():
with open(__lowercase , "w" ) as writer:
for key, value in metrics.items():
logger.info(" %s = %s" , __lowercase , __lowercase )
writer.write("%s = %s\n" % (key, value) )
# Save predictions
_UpperCAmelCase = os.path.join(training_args.output_dir , "test_predictions.txt" )
if trainer.is_world_process_zero():
with open(__lowercase , "w" ) as writer:
with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f:
token_classification_task.write_predictions_to_file(__lowercase , __lowercase , __lowercase )
return results
def UpperCAmelCase_ ( __lowercase : Dict ) -> List[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 236 |
'''simple docstring'''
from manim import *
class A_ ( lowerCAmelCase_ ):
def lowercase ( self : Dict ):
_UpperCAmelCase = Rectangle(height=0.5 , width=0.5 )
_UpperCAmelCase = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
_UpperCAmelCase = Rectangle(height=0.2_5 , width=0.2_5 )
_UpperCAmelCase = [mem.copy() for i in range(6 )]
_UpperCAmelCase = [mem.copy() for i in range(6 )]
_UpperCAmelCase = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
_UpperCAmelCase = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
_UpperCAmelCase = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
_UpperCAmelCase = Text("CPU" , font_size=2_4 )
_UpperCAmelCase = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case_ )
_UpperCAmelCase = [mem.copy() for i in range(4 )]
_UpperCAmelCase = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
_UpperCAmelCase = Text("GPU" , font_size=2_4 )
_UpperCAmelCase = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
gpu.move_to([-1, -1, 0] )
self.add(snake_case_ )
_UpperCAmelCase = [mem.copy() for i in range(6 )]
_UpperCAmelCase = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
_UpperCAmelCase = Text("Model" , font_size=2_4 )
_UpperCAmelCase = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
model.move_to([3, -1.0, 0] )
self.add(snake_case_ )
_UpperCAmelCase = []
_UpperCAmelCase = []
for i, rect in enumerate(snake_case_ ):
_UpperCAmelCase = fill.copy().set_fill(snake_case_ , opacity=0.8 )
target.move_to(snake_case_ )
model_arr.append(snake_case_ )
_UpperCAmelCase = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(snake_case_ )
self.add(*snake_case_ , *snake_case_ )
_UpperCAmelCase = [meta_mem.copy() for i in range(6 )]
_UpperCAmelCase = [meta_mem.copy() for i in range(6 )]
_UpperCAmelCase = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
_UpperCAmelCase = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
_UpperCAmelCase = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
_UpperCAmelCase = Text("Disk" , font_size=2_4 )
_UpperCAmelCase = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
disk.move_to([-4, -1.2_5, 0] )
self.add(snake_case_ , snake_case_ )
_UpperCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_UpperCAmelCase = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(snake_case_ , snake_case_ )
_UpperCAmelCase = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=1_8 , )
blue_text.next_to(snake_case_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(snake_case_ )
_UpperCAmelCase = MarkupText(
f'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ ) )
_UpperCAmelCase = Square(0.3 )
input.set_fill(snake_case_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , snake_case_ , buff=0.5 )
self.play(Write(snake_case_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=snake_case_ , buff=0.0_2 )
self.play(MoveToTarget(snake_case_ ) )
self.play(FadeOut(snake_case_ ) )
_UpperCAmelCase = Arrow(start=snake_case_ , end=snake_case_ , color=snake_case_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , snake_case_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_UpperCAmelCase = MarkupText(
f'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ , run_time=3 ) )
_UpperCAmelCase = {"run_time": 1, "fade_in": True, "fade_out": True, "buff": 0.0_2}
self.play(
Write(snake_case_ ) , Circumscribe(model_arr[0] , color=snake_case_ , **snake_case_ ) , Circumscribe(model_cpu_arr[0] , color=snake_case_ , **snake_case_ ) , Circumscribe(gpu_rect[0] , color=snake_case_ , **snake_case_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
_UpperCAmelCase = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.0_2 , snake_case_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.0_2 )
_UpperCAmelCase = AnimationGroup(
FadeOut(snake_case_ , run_time=0.5 ) , MoveToTarget(snake_case_ , run_time=0.5 ) , FadeIn(snake_case_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(snake_case_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_UpperCAmelCase = 0.7
self.play(
Circumscribe(model_arr[i] , **snake_case_ ) , Circumscribe(cpu_left_col_base[i] , **snake_case_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=snake_case_ , **snake_case_ ) , Circumscribe(gpu_rect[0] , color=snake_case_ , **snake_case_ ) , Circumscribe(model_arr[i + 1] , color=snake_case_ , **snake_case_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.0_2 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=snake_case_ , **snake_case_ ) , Circumscribe(cpu_left_col_base[-1] , color=snake_case_ , **snake_case_ ) , Circumscribe(gpu_rect[0] , color=snake_case_ , **snake_case_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
_UpperCAmelCase = a_c
_UpperCAmelCase = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.0_2 , buff=0.5 )
self.play(
FadeOut(snake_case_ ) , FadeOut(snake_case_ , run_time=0.5 ) , )
_UpperCAmelCase = MarkupText(f'Inference on a model too large for GPU memory\nis successfully completed.' , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ , run_time=3 ) , MoveToTarget(snake_case_ ) )
self.wait()
| 236 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__UpperCAmelCase : Dict = logging.get_logger(__name__)
__UpperCAmelCase : List[Any] = {'vocab_file': 'spiece.model'}
__UpperCAmelCase : int = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : List[Any] , __snake_case : Optional[int] , __snake_case : str=False , __snake_case : Any=True , __snake_case : List[str]=False , __snake_case : Union[str, Any]="<s>" , __snake_case : Optional[Any]="</s>" , __snake_case : List[str]="<unk>" , __snake_case : Optional[int]="<sep>" , __snake_case : Dict="<pad>" , __snake_case : Optional[int]="<cls>" , __snake_case : Optional[int]="<mask>" , __snake_case : Dict=["<eop>", "<eod>"] , __snake_case : Optional[Dict[str, Any]] = None , **__snake_case : Union[str, Any] , ) -> None:
_a : List[Any] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
_a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__snake_case , remove_space=__snake_case , keep_accents=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , additional_special_tokens=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
_a : int = 3
_a : Optional[int] = do_lower_case
_a : List[str] = remove_space
_a : Tuple = keep_accents
_a : int = vocab_file
_a : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__snake_case )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
_a : Dict = jieba
_a : Union[str, Any] = str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def snake_case_ ( self : Optional[int] ) -> Union[str, Any]:
return len(self.sp_model )
def snake_case_ ( self : Any ) -> Dict:
_a : Union[str, Any] = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ) -> Tuple:
_a : Tuple = self.__dict__.copy()
_a : List[str] = None
return state
def __setstate__( self : Dict , __snake_case : Union[str, Any] ) -> Optional[Any]:
_a : Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_a : Union[str, Any] = {}
_a : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case_ ( self : List[Any] , __snake_case : Union[str, Any] ) -> Tuple:
if self.remove_space:
_a : Optional[int] = ''' '''.join(inputs.strip().split() )
else:
_a : str = inputs
_a : Union[str, Any] = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
_a : Any = unicodedata.normalize('''NFKD''' , __snake_case )
_a : Union[str, Any] = ''''''.join([c for c in outputs if not unicodedata.combining(__snake_case )] )
if self.do_lower_case:
_a : Dict = outputs.lower()
return outputs
def snake_case_ ( self : int , __snake_case : str ) -> List[str]:
_a : Optional[int] = self.preprocess_text(__snake_case )
_a : List[str] = self.sp_model.encode(__snake_case , out_type=__snake_case )
_a : Optional[int] = []
for piece in pieces:
if len(__snake_case ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
_a : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__snake_case , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_a : List[str] = cur_pieces[1:]
else:
_a : str = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__snake_case )
else:
new_pieces.append(__snake_case )
return new_pieces
def snake_case_ ( self : Tuple , __snake_case : Union[str, Any] ) -> Dict:
return self.sp_model.PieceToId(__snake_case )
def snake_case_ ( self : List[str] , __snake_case : Tuple ) -> List[str]:
return self.sp_model.IdToPiece(__snake_case )
def snake_case_ ( self : Optional[int] , __snake_case : Optional[int] ) -> int:
_a : int = ''''''.join(__snake_case ).replace(__snake_case , ''' ''' ).strip()
return out_string
def snake_case_ ( self : List[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
_a : Dict = [self.sep_token_id]
_a : Dict = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def snake_case_ ( self : Optional[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is not None:
return ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case )) + [1, 1]
return ([0] * len(__snake_case )) + [1, 1]
def snake_case_ ( self : List[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
_a : Optional[int] = [self.sep_token_id]
_a : Dict = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def snake_case_ ( self : Union[str, Any] , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : Dict = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(__snake_case , '''wb''' ) as fi:
_a : List[str] = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (out_vocab_file,)
def snake_case_ ( self : Dict , *__snake_case : Dict , **__snake_case : Optional[Any] ) -> List[str]:
_a : Optional[int] = super()._decode(*__snake_case , **__snake_case )
_a : List[Any] = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 712 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def lowerCamelCase_ ( UpperCamelCase_ = 8 ):
_a : int = ascii_letters + digits + punctuation
return "".join(secrets.choice(UpperCamelCase_ ) for _ in range(UpperCamelCase_ ) )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(UpperCamelCase_ )
_a : Dict = i // 3
_a : Union[str, Any] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_a : Optional[Any] = (
chars_incl
+ random(UpperCamelCase_ , quotient + remainder )
+ random(UpperCamelCase_ , UpperCamelCase_ )
+ random(UpperCamelCase_ , UpperCamelCase_ )
)
_a : List[str] = list(UpperCamelCase_ )
shuffle(UpperCamelCase_ )
return "".join(UpperCamelCase_ )
# random is a generalised function for letters, characters and numbers
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
return "".join(secrets.choice(UpperCamelCase_ ) for _ in range(UpperCamelCase_ ) )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
pass # Put your code here...
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
pass # Put your code here...
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
pass # Put your code here...
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ = 8 ):
if len(UpperCamelCase_ ) < min_length:
# Your Password must be at least 8 characters long
return False
_a : List[str] = any(char in ascii_uppercase for char in password )
_a : Optional[Any] = any(char in ascii_lowercase for char in password )
_a : int = any(char in digits for char in password )
_a : Optional[Any] = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def lowerCamelCase_ ( ):
_a : Any = int(input('''Please indicate the max length of your password: ''' ).strip() )
_a : Any = input(
'''Please indicate the characters that must be in your password: ''' ).strip()
print('''Password generated:''' , password_generator(UpperCamelCase_ ) )
print(
'''Alternative Password generated:''' , alternative_password_generator(UpperCamelCase_ , UpperCamelCase_ ) , )
print('''[If you are thinking of using this passsword, You better save it.]''' )
if __name__ == "__main__":
main()
| 249 | 0 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
SCREAMING_SNAKE_CASE_ : list[list[str]] = [[] for _ in range(_a )]
SCREAMING_SNAKE_CASE_ : Optional[int] = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1 or len(_a ) <= key:
return input_string
for position, character in enumerate(_a ):
SCREAMING_SNAKE_CASE_ : Optional[int] = position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE_ : Any = min(_a , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(_a )
SCREAMING_SNAKE_CASE_ : Dict = [''''''.join(_a ) for row in temp_grid]
SCREAMING_SNAKE_CASE_ : Optional[int] = ''''''.join(_a )
return output_string
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
SCREAMING_SNAKE_CASE_ : Dict = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1:
return input_string
SCREAMING_SNAKE_CASE_ : list[list[str]] = [[] for _ in range(_a )] # generates template
for position in range(len(_a ) ):
SCREAMING_SNAKE_CASE_ : List[Any] = position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE_ : Union[str, Any] = min(_a , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('*' )
SCREAMING_SNAKE_CASE_ : Any = 0
for row in temp_grid: # fills in the characters
SCREAMING_SNAKE_CASE_ : Any = input_string[counter : counter + len(_a )]
grid.append(list(_a ) )
counter += len(_a )
SCREAMING_SNAKE_CASE_ : Tuple = '''''' # reads as zigzag
for position in range(len(_a ) ):
SCREAMING_SNAKE_CASE_ : List[Any] = position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE_ : Tuple = min(_a , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> dict[int, str]:
SCREAMING_SNAKE_CASE_ : Dict = {}
for key_guess in range(1 , len(_a ) ): # tries every key
SCREAMING_SNAKE_CASE_ : Optional[Any] = decrypt(_a , _a )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 |
def UpperCamelCase ( _a = 1 , _a = 1_0_0_0 ) -> int:
'''simple docstring'''
lowercase_ :str = 1
lowercase_ :Union[str, Any] = 0
for divide_by_number in range(_a , digit + 1 ):
lowercase_ :list[int] = []
lowercase_ :Any = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(_a ):
lowercase_ :Optional[Any] = len(_a )
lowercase_ :str = divide_by_number
else:
has_been_divided.append(_a )
lowercase_ :str = now_divide * 1_0 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 257 | 0 |
def lowerCAmelCase ( UpperCamelCase__ : int ) -> bool:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Tuple = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 715 |
from sklearn.metrics import recall_score
import datasets
lowerCAmelCase : str = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
lowerCAmelCase : Dict = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
lowerCAmelCase : List[Any] = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def snake_case_ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=1 , _lowerCAmelCase="binary" , _lowerCAmelCase=None , _lowerCAmelCase="warn" , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = recall_score(
_lowerCAmelCase , _lowerCAmelCase , labels=_lowerCAmelCase , pos_label=_lowerCAmelCase , average=_lowerCAmelCase , sample_weight=_lowerCAmelCase , zero_division=_lowerCAmelCase , )
return {"recall": float(_lowerCAmelCase ) if score.size == 1 else score}
| 146 | 0 |
"""simple docstring"""
from statistics import mean
import numpy as np
def lowercase (_snake_case ,_snake_case ,_snake_case ,_snake_case ) -> Any:
'''simple docstring'''
__UpperCamelCase = 0
# Number of processes finished
__UpperCamelCase = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
__UpperCamelCase = [0] * no_of_process
# List to include calculation results
__UpperCamelCase = [0] * no_of_process
# Sort by arrival time.
__UpperCamelCase = [burst_time[i] for i in np.argsort(lowercase__ )]
__UpperCamelCase = [process_name[i] for i in np.argsort(lowercase__ )]
arrival_time.sort()
while no_of_process > finished_process_count:
__UpperCamelCase = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
__UpperCamelCase = arrival_time[i]
__UpperCamelCase = 0
# Index showing the location of the process being performed
__UpperCamelCase = 0
# Saves the current response ratio.
__UpperCamelCase = 0
for i in range(0 ,lowercase__ ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
__UpperCamelCase = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
__UpperCamelCase = temp
__UpperCamelCase = i
# Calculate the turn around time
__UpperCamelCase = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
__UpperCamelCase = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def lowercase (_snake_case ,_snake_case ,_snake_case ,_snake_case ) -> int:
'''simple docstring'''
__UpperCamelCase = [0] * no_of_process
for i in range(0 ,lowercase__ ):
__UpperCamelCase = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
_A = 5
_A = ["A", "B", "C", "D", "E"]
_A = [1, 2, 3, 4, 5]
_A = [1, 2, 3, 4, 5]
_A = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
_A = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("Process name \tArrival time \tBurst time \tTurn around time \tWaiting time")
for i in range(0, no_of_process):
print(
f"""{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t"""
f"""{turn_around_time[i]}\t\t\t{waiting_time[i]}"""
)
print(f"""average waiting time : {mean(waiting_time):.5f}""")
print(f"""average turn around time : {mean(turn_around_time):.5f}""") | 505 | from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case ( UpperCamelCase_ ):
lowercase_ = ['image_processor', 'tokenizer']
lowercase_ = 'AutoImageProcessor'
lowercase_ = 'AutoTokenizer'
def __init__( self : List[Any] , a_ : int , a_ : Union[str, Any] )-> List[Any]:
"""simple docstring"""
super().__init__(a_ , a_ )
SCREAMING_SNAKE_CASE__ : str = self.image_processor
def __call__( self : Tuple , a_ : str=None , a_ : List[Any]=None , a_ : Optional[Any]=None , **a_ : Dict )-> Tuple:
"""simple docstring"""
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
SCREAMING_SNAKE_CASE__ : Any = self.tokenizer(a_ , return_tensors=a_ , **a_ )
if images is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processor(a_ , return_tensors=a_ , **a_ )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ : List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def __lowercase( self : Dict , *a_ : Any , **a_ : Any )-> List[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*a_ , **a_ )
def __lowercase( self : Dict , *a_ : Union[str, Any] , **a_ : Optional[int] )-> Dict:
"""simple docstring"""
return self.tokenizer.decode(*a_ , **a_ )
@property
def __lowercase( self : Any )-> Any:
"""simple docstring"""
return ["input_ids", "attention_mask", "pixel_values"]
| 85 | 0 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __UpperCamelCase ( ):
__UpperCAmelCase : Optional[Any] = HfArgumentParser(_UpperCAmelCase )
__UpperCAmelCase : List[str] = parser.parse_args_into_dataclasses()[0]
__UpperCAmelCase : Optional[Any] = TensorFlowBenchmark(args=_UpperCAmelCase )
try:
__UpperCAmelCase : Any = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__UpperCAmelCase : Any = "Arg --no_{0} is no longer used, please use --no-{0} instead."
__UpperCAmelCase : str = " ".join(str(_UpperCAmelCase ).split(" " )[:-1] )
__UpperCAmelCase : Any = ""
__UpperCAmelCase : List[Any] = eval(str(_UpperCAmelCase ).split(" " )[-1] )
__UpperCAmelCase : Optional[Any] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
__UpperCAmelCase : int = full_error_msg + begin_error_msg + str(_UpperCAmelCase )
raise ValueError(_UpperCAmelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 329 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '''ClapFeatureExtractor'''
SCREAMING_SNAKE_CASE = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
def __call__( self : str , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[int]=None , **UpperCAmelCase_ : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = kwargs.pop("sampling_rate" , UpperCAmelCase_ )
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none." )
if text is not None:
__UpperCAmelCase : str = self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if audios is not None:
__UpperCAmelCase : List[Any] = self.feature_extractor(
UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if text is not None and audios is not None:
__UpperCAmelCase : Any = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase_ ) , tensor_type=UpperCAmelCase_ )
def lowerCamelCase_ ( self : Any , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCamelCase_ ( self : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[str] ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = self.tokenizer.model_input_names
__UpperCAmelCase : Tuple = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 329 | 1 |
from numpy import exp, pi, sqrt
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase = 0.0, __UpperCamelCase = 1.0 ):
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 151 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase_ = 16
lowerCamelCase_ = 32
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase = 16 ):
SCREAMING_SNAKE_CASE__ =AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE__ =load_dataset("""glue""", """mrpc""" )
def tokenize_function(__UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ =tokenizer(examples["""sentence1"""], examples["""sentence2"""], truncation=__UpperCamelCase, max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE__ =datasets.map(
__UpperCamelCase, batched=__UpperCamelCase, remove_columns=["""idx""", """sentence1""", """sentence2"""], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ =tokenized_datasets.rename_column("""label""", """labels""" )
def collate_fn(__UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE__ =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE__ =16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE__ =8
else:
SCREAMING_SNAKE_CASE__ =None
return tokenizer.pad(
__UpperCamelCase, padding="""longest""", max_length=__UpperCamelCase, pad_to_multiple_of=__UpperCamelCase, return_tensors="""pt""", )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ =DataLoader(
tokenized_datasets["""train"""], shuffle=__UpperCamelCase, collate_fn=__UpperCamelCase, batch_size=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =DataLoader(
tokenized_datasets["""validation"""], shuffle=__UpperCamelCase, collate_fn=__UpperCamelCase, batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCamelCase_ = mocked_dataloaders # noqa: F811
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", __UpperCamelCase ) == "1":
SCREAMING_SNAKE_CASE__ =2
# New Code #
SCREAMING_SNAKE_CASE__ =int(args.gradient_accumulation_steps )
# Initialize accelerator
SCREAMING_SNAKE_CASE__ =Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, gradient_accumulation_steps=__UpperCamelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ =config["""lr"""]
SCREAMING_SNAKE_CASE__ =int(config["""num_epochs"""] )
SCREAMING_SNAKE_CASE__ =int(config["""seed"""] )
SCREAMING_SNAKE_CASE__ =int(config["""batch_size"""] )
SCREAMING_SNAKE_CASE__ =evaluate.load("""glue""", """mrpc""" )
set_seed(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =get_dataloaders(__UpperCamelCase, __UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ =AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""", return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE__ =model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ =AdamW(params=model.parameters(), lr=__UpperCamelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE__ =get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase, num_warmup_steps=100, num_training_steps=(len(__UpperCamelCase ) * num_epochs), )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =accelerator.prepare(
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =model(**__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =output.loss
accelerator.backward(__UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ =model(**__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__UpperCamelCase, references=__UpperCamelCase, )
SCREAMING_SNAKE_CASE__ =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""", __UpperCamelCase )
def UpperCAmelCase_ ( ):
SCREAMING_SNAKE_CASE__ =argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""", type=__UpperCamelCase, default=__UpperCamelCase, choices=["""no""", """fp16""", """bf16""", """fp8"""], help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""", )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""", type=__UpperCamelCase, default=1, help="""The number of minibatches to be ran before gradients are accumulated.""", )
parser.add_argument("""--cpu""", action="""store_true""", help="""If passed, will train on the CPU.""" )
SCREAMING_SNAKE_CASE__ =parser.parse_args()
SCREAMING_SNAKE_CASE__ ={"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__UpperCamelCase, __UpperCamelCase )
if __name__ == "__main__":
main()
| 151 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=lowercase__ ):
snake_case_ = ["""torch""", """scipy"""]
def __init__( self : Any , *_lowercase : str , **_lowercase : Any ) -> Optional[int]:
requires_backends(self , ["torch", "scipy"] )
@classmethod
def _lowerCamelCase ( cls : Dict , *_lowercase : str , **_lowercase : Dict ) -> Any:
requires_backends(cls , ["torch", "scipy"] )
@classmethod
def _lowerCamelCase ( cls : str , *_lowercase : Optional[int] , **_lowercase : Optional[int] ) -> Tuple:
requires_backends(cls , ["torch", "scipy"] ) | 715 | """simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
__UpperCamelCase : List[str] = logging.getLogger(__name__)
__UpperCamelCase : List[Any] = "Hello world! cécé herlolip"
__UpperCamelCase : Any = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def __UpperCAmelCase ( _snake_case : Union[str, Any], _snake_case : str ):
_lowercase = BertAbsConfig(
temp_dir=".", finetune_bert=_snake_case, large=_snake_case, share_emb=_snake_case, use_bert_emb=_snake_case, encoder="bert", max_pos=5_1_2, enc_layers=6, enc_hidden_size=5_1_2, enc_heads=8, enc_ff_size=5_1_2, enc_dropout=0.2, dec_layers=6, dec_hidden_size=7_6_8, dec_heads=8, dec_ff_size=2_0_4_8, dec_dropout=0.2, )
_lowercase = torch.load(_snake_case, lambda _snake_case, _snake_case : storage )
_lowercase = AbsSummarizer(_snake_case, torch.device("cpu" ), _snake_case )
original.eval()
_lowercase = BertAbsSummarizer(_snake_case, torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
_lowercase = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
_lowercase = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(_snake_case )) )
_lowercase = torch.tensor(_snake_case ).unsqueeze(0 )
_lowercase = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(_snake_case )) )
_lowercase = torch.tensor(_snake_case ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
_lowercase = encoder_input_ids
_lowercase = decoder_input_ids
_lowercase = _lowercase = None
_lowercase = None
_lowercase = _lowercase = None
_lowercase = _lowercase = None
_lowercase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
_lowercase = original(_snake_case, _snake_case, _snake_case, _snake_case, _snake_case, _snake_case, _snake_case )[0]
_lowercase = original.generator(_snake_case )
_lowercase = new_model(
_snake_case, _snake_case, _snake_case, _snake_case, _snake_case )[0]
_lowercase = new_model.generator(_snake_case )
_lowercase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_snake_case ) )
_lowercase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_snake_case ) )
_lowercase = torch.allclose(_snake_case, _snake_case, atol=1e-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict(), "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
) | 227 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
a = logging.getLogger(__name__)
torch.set_grad_enabled(False)
a = "cuda" if torch.cuda.is_available() else "cpu"
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=" " ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = text.split(__UpperCAmelCase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase )]
def __magic_name__ ( __UpperCAmelCase ) -> dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(__UpperCAmelCase ):
titles.append(title if title is not None else """""" )
texts.append(__UpperCAmelCase )
return {"title": titles, "text": texts}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=__UpperCAmelCase , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
__SCREAMING_SNAKE_CASE = ctx_encoder(input_ids.to(device=__UpperCAmelCase ) , return_dict=__UpperCAmelCase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Any:
'''simple docstring'''
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
__SCREAMING_SNAKE_CASE = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
__SCREAMING_SNAKE_CASE = dataset.map(__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=processing_args.num_proc )
# And compute the embeddings
__SCREAMING_SNAKE_CASE = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
__SCREAMING_SNAKE_CASE = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
__SCREAMING_SNAKE_CASE = dataset.map(
partial(__UpperCAmelCase , ctx_encoder=__UpperCAmelCase , ctx_tokenizer=__UpperCAmelCase ) , batched=__UpperCAmelCase , batch_size=processing_args.batch_size , features=__UpperCAmelCase , )
# And finally save your dataset
__SCREAMING_SNAKE_CASE = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(__UpperCAmelCase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
__SCREAMING_SNAKE_CASE = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=__UpperCAmelCase )
# And save the index
__SCREAMING_SNAKE_CASE = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(__UpperCAmelCase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __a :
__UpperCamelCase : str = field(
default=str(Path(_snake_case ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ), metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''}, )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'}, )
__UpperCamelCase : str = field(
default='facebook/rag-sequence-nq', metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''}, )
__UpperCamelCase : str = field(
default='facebook/dpr-ctx_encoder-multiset-base', metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
}, )
__UpperCamelCase : Optional[str] = field(
default=str(Path(_snake_case ).parent / 'test_run' / 'dummy-kb' ), metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'}, )
@dataclass
class __a :
__UpperCamelCase : Optional[int] = field(
default=_snake_case, metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
}, )
__UpperCamelCase : int = field(
default=16, metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
}, )
@dataclass
class __a :
__UpperCamelCase : int = field(
default=768, metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'}, )
__UpperCamelCase : int = field(
default=128, metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
}, )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
a = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
a , a , a = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
a = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 109 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def __lowerCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , **UpperCAmelCase__ : List[str] ) -> Tuple:
lowerCamelCase_ = AutoConfig.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCamelCase_ = AutoModelForSeqaSeqLM.from_config(UpperCAmelCase__ )
model.save_pretrained(UpperCAmelCase__ )
AutoTokenizer.from_pretrained(UpperCAmelCase__ ).save_pretrained(UpperCAmelCase__ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 272 | 0 |
'''simple docstring'''
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =[]
_UpperCamelCase =set({'''(''', '''[''', '''{'''} )
_UpperCamelCase =set({''')''', ''']''', '''}'''} )
_UpperCamelCase ={'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(__SCREAMING_SNAKE_CASE ) == 0 or (len(__SCREAMING_SNAKE_CASE ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(__SCREAMING_SNAKE_CASE ) == 0
def _a ():
"""simple docstring"""
_UpperCamelCase =input('''Enter sequence of brackets: ''' )
if is_balanced(__SCREAMING_SNAKE_CASE ):
print(__SCREAMING_SNAKE_CASE , '''is balanced''' )
else:
print(__SCREAMING_SNAKE_CASE , '''is not balanced''' )
if __name__ == "__main__":
main()
| 271 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return EnvironmentCommand()
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return EnvironmentCommand(args.accelerate_config_file )
class UpperCAmelCase ( lowercase_):
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( UpperCamelCase__ : ArgumentParser ) -> Union[str, Any]:
_UpperCamelCase =parser.add_parser('''env''' )
download_parser.set_defaults(func=UpperCamelCase__ )
download_parser.add_argument(
'''--accelerate-config_file''' , default=UpperCamelCase__ , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self : Tuple , UpperCamelCase__ : Dict , *UpperCamelCase__ : str ) -> None:
_UpperCamelCase =accelerate_config_file
def UpperCamelCase__ ( self : str ) -> int:
_UpperCamelCase ='''not installed'''
if is_safetensors_available():
import safetensors
_UpperCamelCase =safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
_UpperCamelCase =F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
_UpperCamelCase ='''not installed'''
_UpperCamelCase =_UpperCamelCase ='''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_UpperCamelCase =accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(UpperCamelCase__ ):
_UpperCamelCase =load_config_from_file(self._accelerate_config_file ).to_dict()
_UpperCamelCase =(
'''\n'''.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
else F'''\t{accelerate_config}'''
)
_UpperCamelCase ='''not installed'''
_UpperCamelCase ='''NA'''
if is_torch_available():
import torch
_UpperCamelCase =torch.__version__
_UpperCamelCase =torch.cuda.is_available()
_UpperCamelCase ='''not installed'''
_UpperCamelCase ='''NA'''
if is_tf_available():
import tensorflow as tf
_UpperCamelCase =tf.__version__
try:
# deprecated in v2.1
_UpperCamelCase =tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_UpperCamelCase =bool(tf.config.list_physical_devices('''GPU''' ) )
_UpperCamelCase ='''not installed'''
_UpperCamelCase ='''not installed'''
_UpperCamelCase ='''not installed'''
_UpperCamelCase ='''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
_UpperCamelCase =flax.__version__
_UpperCamelCase =jax.__version__
_UpperCamelCase =jaxlib.__version__
_UpperCamelCase =jax.lib.xla_bridge.get_backend().platform
_UpperCamelCase ={
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': F'''{safetensors_version}''',
'''Accelerate version''': F'''{accelerate_version}''',
'''Accelerate config''': F'''{accelerate_config_str}''',
'''PyTorch version (GPU?)''': F'''{pt_version} ({pt_cuda_available})''',
'''Tensorflow version (GPU?)''': F'''{tf_version} ({tf_cuda_available})''',
'''Flax version (CPU?/GPU?/TPU?)''': F'''{flax_version} ({jax_backend})''',
'''Jax version''': F'''{jax_version}''',
'''JaxLib version''': F'''{jaxlib_version}''',
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(UpperCamelCase__ ) )
return info
@staticmethod
def UpperCamelCase__ ( UpperCamelCase__ : Optional[Any] ) -> Dict:
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 271 | 1 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def SCREAMING_SNAKE_CASE_ ( __A : List[str] ) -> List[str]:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : nn.Module , SCREAMING_SNAKE_CASE__ : int ) -> Any:
super().__init__()
a_ : Any = module
a_ : Optional[Any] = nn.Sequential(
nn.Linear(module.in_features , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ ) , nn.Linear(SCREAMING_SNAKE_CASE__ , module.out_features , bias=SCREAMING_SNAKE_CASE__ ) , )
a_ : Dict = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=SCREAMING_SNAKE_CASE__ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Any ) -> str:
return self.module(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) + self.adapter(SCREAMING_SNAKE_CASE__ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
snake_case__ : int = '''bigscience/bloom-1b7'''
# Constant values
snake_case__ : int = 2.1_09_65_95_52_69_25_74
snake_case__ : str = '''Hello my name is'''
snake_case__ : List[str] = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
snake_case__ : List[Any] = 10
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
# Models and tokenizer
a_ : Tuple = AutoTokenizer.from_pretrained(self.model_name )
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
super().setUp()
# Models and tokenizer
a_ : int = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
a_ : Optional[int] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE__ , device_map='auto' )
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
a_ : Union[str, Any] = self.model_abit.config
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'quantization_config' ) )
a_ : Optional[Any] = config.to_dict()
a_ : List[str] = config.to_diff_dict()
a_ : int = config.to_json_string()
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
from bitsandbytes.nn import Paramsabit
a_ : Optional[Any] = self.model_fpaa.get_memory_footprint()
a_ : Tuple = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
a_ : Optional[int] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(SCREAMING_SNAKE_CASE__ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
a_ : Any = self.tokenizer(self.input_text , return_tensors='pt' )
a_ : List[Any] = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE__ ) , self.EXPECTED_OUTPUTS )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
a_ : str = BitsAndBytesConfig()
a_ : Optional[int] = True
a_ : Optional[int] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE__ , device_map='auto' )
a_ : Any = self.tokenizer(self.input_text , return_tensors='pt' )
a_ : Union[str, Any] = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE__ ) , self.EXPECTED_OUTPUTS )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
with self.assertRaises(SCREAMING_SNAKE_CASE__ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
a_ : Optional[int] = BitsAndBytesConfig()
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
a_ : Any = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE__ , load_in_abit=SCREAMING_SNAKE_CASE__ , device_map='auto' , bnb_abit_quant_type='nf4' , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
a_ : Optional[int] = self.tokenizer(self.input_text , return_tensors='pt' )
a_ : Optional[Any] = self.model_fpaa.to(torch.floataa )
a_ : List[Any] = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=1_0 )
# Check this does not throw an error
a_ : str = self.model_fpaa.to('cpu' )
# Check this does not throw an error
a_ : Any = self.model_fpaa.half()
# Check this does not throw an error
a_ : str = self.model_fpaa.float()
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
a_ : Dict = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=SCREAMING_SNAKE_CASE__ , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any ) -> Any:
a_ : str = 't5-small'
a_ : Any = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
a_ : Dict = AutoTokenizer.from_pretrained(cls.model_name )
a_ : Optional[Any] = 'Translate in German: Hello, my dog is cute'
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
from transformers import TaForConditionalGeneration
a_ : Union[str, Any] = TaForConditionalGeneration._keep_in_fpaa_modules
a_ : Union[str, Any] = None
# test with `t5-small`
a_ : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE__ , device_map='auto' )
a_ : Optional[Any] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
a_ : Optional[int] = model.generate(**SCREAMING_SNAKE_CASE__ )
# test with `flan-t5-small`
a_ : List[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE__ , device_map='auto' )
a_ : Optional[Any] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
a_ : Optional[int] = model.generate(**SCREAMING_SNAKE_CASE__ )
a_ : int = modules
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
a_ : Tuple = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE__ , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
a_ : Optional[Any] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
a_ : str = model.generate(**SCREAMING_SNAKE_CASE__ )
# test with `flan-t5-small`
a_ : Union[str, Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE__ , device_map='auto' )
a_ : Optional[Any] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
a_ : Tuple = model.generate(**SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
super().setUp()
# model_name
a_ : List[str] = 'bigscience/bloom-560m'
a_ : Any = 't5-small'
# Different types of model
a_ : Union[str, Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE__ , device_map='auto' )
# Sequence classification model
a_ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE__ , device_map='auto' )
# CausalLM model
a_ : Tuple = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE__ , device_map='auto' )
# Seq2seq model
a_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=SCREAMING_SNAKE_CASE__ , device_map='auto' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
super().setUp()
def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
a_ : List[str] = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
a_ : str = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
super().setUp()
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
a_ : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE__ , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
a_ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
a_ : Optional[int] = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=SCREAMING_SNAKE_CASE__ ) , self.EXPECTED_OUTPUTS )
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
a_ : Optional[int] = 'facebook/opt-350m'
super().setUp()
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
a_ : List[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE__ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
a_ : str = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
a_ : int = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(SCREAMING_SNAKE_CASE__ ) ):
a_ : str = LoRALayer(module.q_proj , rank=1_6 )
a_ : List[Any] = LoRALayer(module.k_proj , rank=1_6 )
a_ : Optional[int] = LoRALayer(module.v_proj , rank=1_6 )
# Step 3: dummy batch
a_ : List[Any] = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
a_ : Any = model.forward(**SCREAMING_SNAKE_CASE__ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(SCREAMING_SNAKE_CASE__ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Optional[int] = '''gpt2-xl'''
snake_case__ : Tuple = 3.31_91_85_48_54_15_21_87
| 570 |
import pytest
import datasets
# Import fixture modules as plugins
UpperCAmelCase_ : Optional[int] = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] , __A : Dict ) -> Any:
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] ) -> Optional[Any]:
"""simple docstring"""
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=__A )
def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : List[str] ) -> Tuple:
"""simple docstring"""
a_ : Dict = tmp_path_factory.getbasetemp() / 'cache'
a_ : List[str] = test_hf_cache_home / 'datasets'
a_ : Any = test_hf_cache_home / 'metrics'
a_ : Optional[Any] = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(__A ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(__A ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(__A ) )
a_ : Optional[int] = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(__A ) )
a_ : Tuple = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(__A ) )
@pytest.fixture(autouse=__A , scope='session' )
def SCREAMING_SNAKE_CASE_ ( ) -> int:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=__A )
def SCREAMING_SNAKE_CASE_ ( __A : List[str] ) -> List[str]:
"""simple docstring"""
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , __A )
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> Optional[int]:
"""simple docstring"""
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , __A )
| 570 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def __snake_case ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : bool , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : float ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , )
)
def __snake_case ( ) -> None:
"""simple docstring"""
UpperCAmelCase = [90, 23, 6, 33, 21, 65, 123, 34_423]
UpperCAmelCase = math.log(len(SCREAMING_SNAKE_CASE_ ) , 2 )
print(f"Optimal value : {minimax(0 , 0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 570 |
'''simple docstring'''
def __snake_case ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> int:
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCAmelCase = _modexpt(SCREAMING_SNAKE_CASE_ , exponent // 2 , SCREAMING_SNAKE_CASE_ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(SCREAMING_SNAKE_CASE_ , exponent - 1 , SCREAMING_SNAKE_CASE_ )) % modulo_value
def __snake_case ( SCREAMING_SNAKE_CASE_ : int = 1_777 , SCREAMING_SNAKE_CASE_ : int = 1_855 , SCREAMING_SNAKE_CASE_ : int = 8 ) -> int:
"""simple docstring"""
UpperCAmelCase = base
for _ in range(1 , SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase = _modexpt(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 10**digits )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 570 | 1 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase__ : int = TypeVar('''DatasetType''', Dataset, IterableDataset)
def __UpperCAmelCase ( lowerCamelCase_ : List[DatasetType] , lowerCamelCase_ : Optional[List[float]] = None , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[DatasetInfo] = None , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(lowerCamelCase_ ):
if not isinstance(lowerCamelCase_ , (Dataset, IterableDataset) ):
if isinstance(lowerCamelCase_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(lowerCamelCase_ )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(lowerCamelCase_ ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCamelCase_ ).__name__}.' )
if i == 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = (
(Dataset, IterableDataset) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , info=lowerCamelCase_ , split=lowerCamelCase_ , stopping_strategy=lowerCamelCase_ )
else:
return _interleave_iterable_datasets(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , info=lowerCamelCase_ , split=lowerCamelCase_ , stopping_strategy=lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[DatasetType] , lowerCamelCase_ : Optional[DatasetInfo] = None , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : int = 0 , ) -> DatasetType:
"""simple docstring"""
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(lowerCamelCase_ ):
if not isinstance(lowerCamelCase_ , (Dataset, IterableDataset) ):
if isinstance(lowerCamelCase_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(lowerCamelCase_ )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(lowerCamelCase_ ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCamelCase_ ).__name__}.' )
if i == 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = (
(Dataset, IterableDataset) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(lowerCamelCase_ , info=lowerCamelCase_ , split=lowerCamelCase_ , axis=lowerCamelCase_ )
else:
return _concatenate_iterable_datasets(lowerCamelCase_ , info=lowerCamelCase_ , split=lowerCamelCase_ , axis=lowerCamelCase_ )
| 105 |
"""simple docstring"""
def __UpperCamelCase ( snake_case__ = 200 ):
A_ : Union[str, Any] = [1, 2, 5, 10, 20, 50, 100, 200]
A_ : int = [0] * (pence + 1)
A_ : Tuple = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(snake_case__ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73_682
| 180 | 0 |
from timeit import timeit
_A = {
"MALAYALAM": True,
"String": False,
"rotor": True,
"level": True,
"A": True,
"BB": True,
"ABC": False,
"amanaplanacanalpanama": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def lowerCamelCase__ ( __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = 0
lowerCAmelCase_ = len(__lowerCAmelCase ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def lowerCamelCase__ ( __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = len(__lowerCAmelCase ) // 2
lowerCAmelCase_ = len(__lowerCAmelCase )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(__lowerCAmelCase ) )
def lowerCamelCase__ ( __lowerCAmelCase : str ):
"""simple docstring"""
if len(__lowerCAmelCase ) <= 2:
return True
if s[0] == s[len(__lowerCAmelCase ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def lowerCamelCase__ ( __lowerCAmelCase : str ):
"""simple docstring"""
return s == s[::-1]
def lowerCamelCase__ ( __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = F"""all({name}(key) is value for key, value in test_data.items())"""
lowerCAmelCase_ = F"""from __main__ import test_data, {name}"""
lowerCAmelCase_ = 500000
lowerCAmelCase_ = timeit(stmt=__lowerCAmelCase , setup=__lowerCAmelCase , number=__lowerCAmelCase )
print(F"""{name:<35} finished {number:,} runs in {result:.5f} seconds""" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f"""{key:21} {value}""")
print("a man a plan a canal panama")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("is_palindrome_slice")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("is_palindrome")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("is_palindrome_recursive")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("is_palindrome_traversal")
| 711 |
def lowerCamelCase__ ( __lowerCAmelCase : int ):
"""simple docstring"""
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
lowerCAmelCase_ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
lowerCAmelCase_ = 1
if upper_limit > 0:
lowerCAmelCase_ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__lowerCAmelCase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("\n********* Catalan Numbers Using Dynamic Programming ************\n")
print("\n*** Enter -1 at any time to quit ***")
print("\nEnter the upper limit (≥ 0) for the Catalan number sequence: ", end="")
try:
while True:
_A = int(input().strip())
if N < 0:
print("\n********* Goodbye!! ************")
break
else:
print(f"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("Try another upper limit for the sequence: ", end="")
except (NameError, ValueError):
print("\n********* Invalid input, goodbye! ************\n")
import doctest
doctest.testmod()
| 279 | 0 |
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class _lowercase ( _A ):
def __init__( self , a , a=None , a=True , a=None , **a ):
snake_case__ : Optional[Any] =parent
snake_case__ : Union[str, Any] =config_class
snake_case__ : Union[str, Any] =has_text_modality
snake_case__ : int =kwargs
snake_case__ : Dict =common_properties
def lowercase__ ( self ):
snake_case__ : Dict =self.config_class(**self.inputs_dict )
snake_case__ : int =(
["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["""vocab_size"""] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(a , a ) , msg=F"`{prop}` does not exist" )
# Test that config has the common properties as setter
for idx, name in enumerate(a ):
try:
setattr(a , a , a )
self.parent.assertEqual(
getattr(a , a ) , a , msg=F"`{name} value {idx} expected, but was {getattr(a , a )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(a ):
try:
snake_case__ : Optional[Any] =self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(a , a ) , a , msg=F"`{name} value {idx} expected, but was {getattr(a , a )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def lowercase__ ( self ):
snake_case__ : List[str] =self.config_class(**self.inputs_dict )
snake_case__ : Any =json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , a )
def lowercase__ ( self ):
snake_case__ : Optional[int] =self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : List[Any] =os.path.join(a , """config.json""" )
config_first.to_json_file(a )
snake_case__ : Union[str, Any] =self.config_class.from_json_file(a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowercase__ ( self ):
snake_case__ : Tuple =self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(a )
snake_case__ : List[str] =self.config_class.from_pretrained(a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowercase__ ( self ):
snake_case__ : Optional[Any] =self.config_class(**self.inputs_dict )
snake_case__ : List[str] ="""test"""
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : int =os.path.join(a , a )
config_first.save_pretrained(a )
snake_case__ : List[Any] =self.config_class.from_pretrained(a , subfolder=a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowercase__ ( self ):
snake_case__ : Any =self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
snake_case__ : List[Any] =3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def lowercase__ ( self ):
if self.config_class.is_composition:
return
snake_case__ : Optional[int] =self.config_class()
self.parent.assertIsNotNone(a )
def lowercase__ ( self ):
snake_case__ : int =copy.deepcopy(a )
snake_case__ : Tuple =self.config_class(**a )
snake_case__ : Dict =[]
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) )
elif getattr(a , a ) != value:
wrong_values.append((key, getattr(a , a ), value) )
if len(a ) > 0:
snake_case__ : Optional[int] ="""\n""".join([F"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] )
raise ValueError(F"The following keys were not properly set in the config:\n{errors}" )
def lowercase__ ( self ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 385 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 385 | 1 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
snake_case__ = version.parse(importlib_metadata.version('''nltk'''))
if NLTK_VERSION >= version.Version('''3.6.4'''):
from nltk import word_tokenize
snake_case__ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
snake_case__ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
snake_case__ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION)
class lowerCAmelCase_ ( datasets.Metric):
def _snake_case ( self : List[str] ) ->List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def _snake_case ( self : int , __A : Optional[Any] ) ->Any:
"""simple docstring"""
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def _snake_case ( self : str , __A : Any , __A : Optional[int] , __A : List[str]=0.9 , __A : Optional[Any]=3 , __A : str=0.5 ) ->int:
"""simple docstring"""
if NLTK_VERSION >= version.Version("3.6.5" ):
a__ :List[Any] = [
meteor_score.single_meteor_score(
word_tokenize(lowerCamelCase_ ) , word_tokenize(lowerCamelCase_ ) , alpha=lowerCamelCase_ , beta=lowerCamelCase_ , gamma=lowerCamelCase_ )
for ref, pred in zip(lowerCamelCase_ , lowerCamelCase_ )
]
else:
a__ :Dict = [
meteor_score.single_meteor_score(lowerCamelCase_ , lowerCamelCase_ , alpha=lowerCamelCase_ , beta=lowerCamelCase_ , gamma=lowerCamelCase_ )
for ref, pred in zip(lowerCamelCase_ , lowerCamelCase_ )
]
return {"meteor": np.mean(lowerCamelCase_ )}
| 718 |
import datasets
from .evaluate import evaluate
snake_case__ = '''\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
'''
snake_case__ = '''
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
'''
snake_case__ = '''
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the SQuAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]
>>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]
>>> squad_metric = datasets.load_metric("squad")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION)
class lowerCAmelCase_ ( datasets.Metric):
def _snake_case ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , )
def _snake_case ( self : Dict , __A : List[Any] , __A : Optional[int] ) ->str:
"""simple docstring"""
a__ :Optional[int] = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
a__ :Optional[Any] = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
a__ :Union[str, Any] = evaluate(dataset=__A , predictions=__A )
return score
| 373 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( snake_case_ , unittest.TestCase):
lowerCAmelCase_ = AudioLDMPipeline
lowerCAmelCase_ = TEXT_TO_AUDIO_PARAMS
lowerCAmelCase_ = TEXT_TO_AUDIO_BATCH_PARAMS
lowerCAmelCase_ = frozenset(
[
"""num_inference_steps""",
"""num_waveforms_per_prompt""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
])
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(32, 64) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=A_ , )
UpperCamelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=A_ , set_alpha_to_one=A_ , )
torch.manual_seed(0 )
UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
UpperCamelCase = ClapTextModelWithProjection(A_ )
UpperCamelCase = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=77 )
UpperCamelCase = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=A_ , )
UpperCamelCase = SpeechTaHifiGan(A_ )
UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def UpperCAmelCase_ ( self , A_ , A_=0 )-> Optional[int]:
'''simple docstring'''
if str(A_ ).startswith('mps' ):
UpperCamelCase = torch.manual_seed(A_ )
else:
UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = AudioLDMPipeline(**A_ )
UpperCamelCase = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = self.get_dummy_inputs(A_ )
UpperCamelCase = audioldm_pipe(**A_ )
UpperCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(A_ ) == 256
UpperCamelCase = audio[:10]
UpperCamelCase = np.array(
[-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = AudioLDMPipeline(**A_ )
UpperCamelCase = audioldm_pipe.to(A_ )
UpperCamelCase = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = self.get_dummy_inputs(A_ )
UpperCamelCase = 3 * [inputs['prompt']]
# forward
UpperCamelCase = audioldm_pipe(**A_ )
UpperCamelCase = output.audios[0]
UpperCamelCase = self.get_dummy_inputs(A_ )
UpperCamelCase = 3 * [inputs.pop('prompt' )]
UpperCamelCase = audioldm_pipe.tokenizer(
A_ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=A_ , return_tensors='pt' , )
UpperCamelCase = text_inputs['input_ids'].to(A_ )
UpperCamelCase = audioldm_pipe.text_encoder(
A_ , )
UpperCamelCase = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCamelCase = F.normalize(A_ , dim=-1 )
UpperCamelCase = prompt_embeds
# forward
UpperCamelCase = audioldm_pipe(**A_ )
UpperCamelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = AudioLDMPipeline(**A_ )
UpperCamelCase = audioldm_pipe.to(A_ )
UpperCamelCase = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = self.get_dummy_inputs(A_ )
UpperCamelCase = 3 * ['this is a negative prompt']
UpperCamelCase = negative_prompt
UpperCamelCase = 3 * [inputs['prompt']]
# forward
UpperCamelCase = audioldm_pipe(**A_ )
UpperCamelCase = output.audios[0]
UpperCamelCase = self.get_dummy_inputs(A_ )
UpperCamelCase = 3 * [inputs.pop('prompt' )]
UpperCamelCase = []
for p in [prompt, negative_prompt]:
UpperCamelCase = audioldm_pipe.tokenizer(
A_ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=A_ , return_tensors='pt' , )
UpperCamelCase = text_inputs['input_ids'].to(A_ )
UpperCamelCase = audioldm_pipe.text_encoder(
A_ , )
UpperCamelCase = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCamelCase = F.normalize(A_ , dim=-1 )
embeds.append(A_ )
UpperCamelCase , UpperCamelCase = embeds
# forward
UpperCamelCase = audioldm_pipe(**A_ )
UpperCamelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = PNDMScheduler(skip_prk_steps=A_ )
UpperCamelCase = AudioLDMPipeline(**A_ )
UpperCamelCase = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = self.get_dummy_inputs(A_ )
UpperCamelCase = 'egg cracking'
UpperCamelCase = audioldm_pipe(**A_ , negative_prompt=A_ )
UpperCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(A_ ) == 256
UpperCamelCase = audio[:10]
UpperCamelCase = np.array(
[-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = PNDMScheduler(skip_prk_steps=A_ )
UpperCamelCase = AudioLDMPipeline(**A_ )
UpperCamelCase = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
UpperCamelCase = audioldm_pipe(A_ , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
UpperCamelCase = 2
UpperCamelCase = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
UpperCamelCase = 2
UpperCamelCase = audioldm_pipe(A_ , num_inference_steps=2 , num_waveforms_per_prompt=A_ ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
UpperCamelCase = 2
UpperCamelCase = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=A_ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = AudioLDMPipeline(**A_ )
UpperCamelCase = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = audioldm_pipe.vocoder.config.sampling_rate
UpperCamelCase = self.get_dummy_inputs(A_ )
UpperCamelCase = audioldm_pipe(audio_length_in_s=0.016 , **A_ )
UpperCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(A_ ) / vocoder_sampling_rate == 0.016
UpperCamelCase = audioldm_pipe(audio_length_in_s=0.032 , **A_ )
UpperCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(A_ ) / vocoder_sampling_rate == 0.032
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = AudioLDMPipeline(**A_ )
UpperCamelCase = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = ['hey']
UpperCamelCase = audioldm_pipe(A_ , num_inference_steps=1 )
UpperCamelCase = output.audios.shape
assert audio_shape == (1, 256)
UpperCamelCase = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
UpperCamelCase = SpeechTaHifiGan(A_ ).to(A_ )
UpperCamelCase = audioldm_pipe(A_ , num_inference_steps=1 )
UpperCamelCase = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A_ )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
self._test_inference_batch_single_identical(test_mean_pixel_difference=A_ )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_ )
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self , A_ , A_="cpu" , A_=torch.floataa , A_=0 )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase = np.random.RandomState(A_ ).standard_normal((1, 8, 128, 16) )
UpperCamelCase = torch.from_numpy(A_ ).to(device=A_ , dtype=A_ )
UpperCamelCase = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
UpperCamelCase = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = self.get_inputs(A_ )
UpperCamelCase = 25
UpperCamelCase = audioldm_pipe(**A_ ).audios[0]
assert audio.ndim == 1
assert len(A_ ) == 81920
UpperCamelCase = audio[77230:77240]
UpperCamelCase = np.array(
[-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315] )
UpperCamelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
UpperCamelCase = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
UpperCamelCase = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = self.get_inputs(A_ )
UpperCamelCase = audioldm_pipe(**A_ ).audios[0]
assert audio.ndim == 1
assert len(A_ ) == 81920
UpperCamelCase = audio[27780:27790]
UpperCamelCase = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212] )
UpperCamelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 3 | import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
def _a ( self ):
lowerCAmelCase_: Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase__ , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(lowerCamelCase__ , "num_attention_heads" ) )
class _lowercase :
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=64 , lowerCamelCase__=3 , lowerCamelCase__=3 , lowerCamelCase__=2 , lowerCamelCase__=1 , lowerCamelCase__=16 , lowerCamelCase__=[128, 256, 384] , lowerCamelCase__=[4, 6, 8] , lowerCamelCase__=[2, 3, 4] , lowerCamelCase__=[16, 16, 16] , lowerCamelCase__=0 , lowerCamelCase__=[2, 2, 2] , lowerCamelCase__=[2, 2, 2] , lowerCamelCase__=0.0_2 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=2 , ):
lowerCAmelCase_: int = parent
lowerCAmelCase_: Tuple = batch_size
lowerCAmelCase_: List[str] = image_size
lowerCAmelCase_: Tuple = num_channels
lowerCAmelCase_: Optional[int] = kernel_size
lowerCAmelCase_: int = stride
lowerCAmelCase_: Optional[int] = padding
lowerCAmelCase_: Tuple = hidden_sizes
lowerCAmelCase_: Union[str, Any] = num_attention_heads
lowerCAmelCase_: Tuple = depths
lowerCAmelCase_: Optional[int] = key_dim
lowerCAmelCase_: Optional[Any] = drop_path_rate
lowerCAmelCase_: List[str] = patch_size
lowerCAmelCase_: Any = attention_ratio
lowerCAmelCase_: Tuple = mlp_ratio
lowerCAmelCase_: Any = initializer_range
lowerCAmelCase_: List[str] = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
lowerCAmelCase_: Any = is_training
lowerCAmelCase_: Optional[int] = use_labels
lowerCAmelCase_: Dict = num_labels
lowerCAmelCase_: Any = initializer_range
def _a ( self ):
lowerCAmelCase_: Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_: Tuple = None
if self.use_labels:
lowerCAmelCase_: int = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase_: Tuple = self.get_config()
return config, pixel_values, labels
def _a ( self ):
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def _a ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase_: int = LevitModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowerCAmelCase_: Optional[Any] = model(lowerCamelCase__ )
lowerCAmelCase_: Tuple = (self.image_size, self.image_size)
lowerCAmelCase_ , lowerCAmelCase_: Optional[int] = image_size[0], image_size[1]
for _ in range(4 ):
lowerCAmelCase_: Union[str, Any] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
lowerCAmelCase_: str = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def _a ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase_: Tuple = self.num_labels
lowerCAmelCase_: str = LevitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowerCAmelCase_: Optional[Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ):
lowerCAmelCase_: Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_: Dict = config_and_inputs
lowerCAmelCase_: Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE: Any = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE: int = (
{
'feature-extraction': LevitModel,
'image-classification': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE: Optional[int] = False
SCREAMING_SNAKE_CASE: Optional[Any] = False
SCREAMING_SNAKE_CASE: str = False
SCREAMING_SNAKE_CASE: str = False
SCREAMING_SNAKE_CASE: List[Any] = False
def _a ( self ):
lowerCAmelCase_: List[str] = LevitModelTester(self )
lowerCAmelCase_: Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def _a ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self ):
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def _a ( self ):
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def _a ( self ):
pass
@unittest.skip(reason="Levit does not output attentions" )
def _a ( self ):
pass
def _a ( self ):
lowerCAmelCase_ , lowerCAmelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_: List[str] = model_class(lowerCamelCase__ )
lowerCAmelCase_: Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_: Any = [*signature.parameters.keys()]
lowerCAmelCase_: str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def _a ( self ):
def check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase_: int = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
lowerCAmelCase_: Dict = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
lowerCAmelCase_: str = outputs.hidden_states
lowerCAmelCase_: List[Any] = len(self.model_tester.depths ) + 1
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
lowerCAmelCase_: List[Any] = (self.model_tester.image_size, self.model_tester.image_size)
lowerCAmelCase_ , lowerCAmelCase_: int = image_size[0], image_size[1]
for _ in range(4 ):
lowerCAmelCase_: Tuple = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
lowerCAmelCase_: Optional[int] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
lowerCAmelCase_ , lowerCAmelCase_: str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_: int = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_: Union[str, Any] = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _a ( self ):
pass
def _a ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
lowerCAmelCase_: List[str] = super()._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _a ( self ):
lowerCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _a ( self ):
lowerCAmelCase_: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def _a ( self ):
if not self.model_tester.is_training:
return
lowerCAmelCase_ , lowerCAmelCase_: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_: int = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCamelCase__ )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
lowerCAmelCase_: Tuple = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
lowerCAmelCase_: Optional[int] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
lowerCAmelCase_: Tuple = model(**lowerCamelCase__ ).loss
loss.backward()
def _a ( self ):
lowerCAmelCase_ , lowerCAmelCase_: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowerCAmelCase_: Any = False
lowerCAmelCase_: Tuple = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
lowerCAmelCase_: Any = model_class(lowerCamelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase__ )
model.train()
lowerCAmelCase_: str = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
lowerCAmelCase_: List[str] = model(**lowerCamelCase__ ).loss
loss.backward()
def _a ( self ):
lowerCAmelCase_ , lowerCAmelCase_: int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_: Optional[int] = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCamelCase__ ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ):
lowerCAmelCase_: Tuple = problem_type["title"]
lowerCAmelCase_: Optional[Any] = problem_type["num_labels"]
lowerCAmelCase_: List[str] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
lowerCAmelCase_: List[Any] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if problem_type["num_labels"] > 1:
lowerCAmelCase_: List[str] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
lowerCAmelCase_: List[Any] = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCamelCase__ ) as warning_list:
lowerCAmelCase_: Dict = model(**lowerCamelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def _a ( self ):
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_: Optional[int] = LevitModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def snake_case__ ( ):
lowerCAmelCase_: Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _a ( self ):
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _a ( self ):
lowerCAmelCase_: Any = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowerCamelCase__ )
lowerCAmelCase_: str = self.default_image_processor
lowerCAmelCase_: int = prepare_img()
lowerCAmelCase_: Optional[int] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase_: Tuple = model(**lowerCamelCase__ )
# verify the logits
lowerCAmelCase_: str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
lowerCAmelCase_: Union[str, Any] = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) ) | 613 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCamelCase ( lowercase__ , unittest.TestCase ):
lowercase : List[Any] = CanineTokenizer
lowercase : str = False
def a__ ( self :Optional[Any] ):
super().setUp()
snake_case_ : int = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self :Optional[int] ):
return CanineTokenizer.from_pretrained("""google/canine-s""" )
def a__ ( self :int ,**_UpperCamelCase :Dict ):
snake_case_ : Dict = self.tokenizer_class.from_pretrained(self.tmpdirname ,**_UpperCamelCase )
snake_case_ : Optional[Any] = 1_0_2_4
return tokenizer
@require_torch
def a__ ( self :List[str] ):
snake_case_ : Union[str, Any] = self.canine_tokenizer
snake_case_ : Optional[int] = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""]
# fmt: off
snake_case_ : Dict = [5_7_3_4_4, 7_6, 1_0_5, 1_0_2, 1_0_1, 3_2, 1_0_5, 1_1_5, 3_2, 1_0_8, 1_0_5, 1_0_7, 1_0_1, 3_2, 9_7, 3_2, 9_8, 1_1_1, 1_2_0, 3_2, 1_1_1, 1_0_2, 3_2, 9_9, 1_0_4, 1_1_1, 9_9, 1_1_1, 1_0_8, 9_7, 1_1_6, 1_0_1, 1_1_5, 4_6, 5_7_3_4_5, 0, 0, 0, 0]
# fmt: on
snake_case_ : Any = tokenizer(_UpperCamelCase ,padding=_UpperCamelCase ,return_tensors="""pt""" )
self.assertIsInstance(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : Optional[Any] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_UpperCamelCase ,_UpperCamelCase )
self.assertEqual((2, 3_9) ,batch.input_ids.shape )
self.assertEqual((2, 3_9) ,batch.attention_mask.shape )
@require_torch
def a__ ( self :int ):
snake_case_ : Union[str, Any] = self.canine_tokenizer
snake_case_ : Dict = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""]
snake_case_ : int = tokenizer(_UpperCamelCase ,padding=_UpperCamelCase ,return_tensors="""pt""" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("""input_ids""" ,_UpperCamelCase )
self.assertIn("""attention_mask""" ,_UpperCamelCase )
self.assertIn("""token_type_ids""" ,_UpperCamelCase )
@require_torch
def a__ ( self :Tuple ):
snake_case_ : Union[str, Any] = self.canine_tokenizer
snake_case_ : Tuple = [
"""What's the weater?""",
"""It's about 25 degrees.""",
]
snake_case_ : Optional[int] = tokenizer(
text_target=_UpperCamelCase ,max_length=3_2 ,padding="""max_length""" ,truncation=_UpperCamelCase ,return_tensors="""pt""" )
self.assertEqual(3_2 ,targets["""input_ids"""].shape[1] )
def a__ ( self :List[str] ):
# safety check on max_len default value so we are sure the test works
snake_case_ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length ,4_2 )
# Now let's start the test
snake_case_ : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
snake_case_ : Dict = tempfile.mkdtemp()
snake_case_ : Optional[int] = """ He is very happy, UNwant\u00E9d,running"""
snake_case_ : Optional[Any] = tokenizer.encode(_UpperCamelCase ,add_special_tokens=_UpperCamelCase )
tokenizer.save_pretrained(_UpperCamelCase )
snake_case_ : Union[str, Any] = tokenizer.__class__.from_pretrained(_UpperCamelCase )
snake_case_ : Dict = after_tokenizer.encode(_UpperCamelCase ,add_special_tokens=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase ,_UpperCamelCase )
shutil.rmtree(_UpperCamelCase )
snake_case_ : Optional[int] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
snake_case_ : List[Any] = tempfile.mkdtemp()
snake_case_ : List[str] = """ He is very happy, UNwant\u00E9d,running"""
snake_case_ : Any = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
snake_case_ : List[Any] = chr(0xE_0_0_7 )
additional_special_tokens.append(_UpperCamelCase )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
snake_case_ : Tuple = tokenizer.encode(_UpperCamelCase ,add_special_tokens=_UpperCamelCase )
tokenizer.save_pretrained(_UpperCamelCase )
snake_case_ : Optional[Any] = tokenizer.__class__.from_pretrained(_UpperCamelCase )
snake_case_ : Tuple = after_tokenizer.encode(_UpperCamelCase ,add_special_tokens=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase ,_UpperCamelCase )
self.assertIn(_UpperCamelCase ,after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length ,4_2 )
snake_case_ : str = tokenizer.__class__.from_pretrained(_UpperCamelCase ,model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length ,4_3 )
shutil.rmtree(_UpperCamelCase )
def a__ ( self :Union[str, Any] ):
snake_case_ : Dict = self.get_tokenizers(do_lower_case=_UpperCamelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_ , snake_case_ : List[str] = self.get_clean_sequence(_UpperCamelCase )
# a special token for Canine can be defined as follows:
snake_case_ : Optional[int] = 0xE_0_0_5
snake_case_ : int = chr(_UpperCamelCase )
tokenizer.add_special_tokens({"""cls_token""": special_token} )
snake_case_ : str = tokenizer.encode(_UpperCamelCase ,add_special_tokens=_UpperCamelCase )
self.assertEqual(len(_UpperCamelCase ) ,1 )
snake_case_ : List[str] = tokenizer.decode(ids + encoded_special_token ,clean_up_tokenization_spaces=_UpperCamelCase )
snake_case_ : Any = tokenizer.encode(_UpperCamelCase ,add_special_tokens=_UpperCamelCase )
snake_case_ : List[str] = tokenizer.encode(_UpperCamelCase ,add_special_tokens=_UpperCamelCase )
snake_case_ : Optional[Any] = tokenizer.encode(_UpperCamelCase ,add_special_tokens=_UpperCamelCase )
self.assertEqual(_UpperCamelCase ,input_encoded + special_token_id )
snake_case_ : Optional[Any] = tokenizer.decode(_UpperCamelCase ,skip_special_tokens=_UpperCamelCase )
self.assertTrue(special_token not in decoded )
def a__ ( self :Any ):
snake_case_ : str = self.get_tokenizers(do_lower_case=_UpperCamelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_ : Optional[Any] = chr(0xE_0_0_5 )
snake_case_ : List[str] = chr(0xE_0_0_6 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] ,special_tokens=_UpperCamelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} )
snake_case_ : Optional[int] = tokenizer.tokenize(_UpperCamelCase )
snake_case_ : int = tokenizer.tokenize(_UpperCamelCase )
self.assertEqual(len(_UpperCamelCase ) ,1 )
self.assertEqual(len(_UpperCamelCase ) ,1 )
self.assertEqual(token_a[0] ,_UpperCamelCase )
self.assertEqual(token_a[0] ,_UpperCamelCase )
@require_tokenizers
def a__ ( self :int ):
snake_case_ : Tuple = self.get_tokenizers(do_lower_case=_UpperCamelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
snake_case_ : Tuple = 0xE_0_0_6
snake_case_ : str = chr(_UpperCamelCase )
snake_case_ : List[Any] = AddedToken(_UpperCamelCase ,lstrip=_UpperCamelCase )
tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(_UpperCamelCase )
tokenizer.from_pretrained(_UpperCamelCase )
def a__ ( self :int ):
snake_case_ : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCamelCase )
with open(os.path.join(_UpperCamelCase ,"""special_tokens_map.json""" ) ,encoding="""utf-8""" ) as json_file:
snake_case_ : Dict = json.load(_UpperCamelCase )
with open(os.path.join(_UpperCamelCase ,"""tokenizer_config.json""" ) ,encoding="""utf-8""" ) as json_file:
snake_case_ : Optional[int] = json.load(_UpperCamelCase )
# a special token for Canine can be defined as follows:
snake_case_ : int = 0xE_0_0_6
snake_case_ : List[str] = chr(_UpperCamelCase )
snake_case_ : Union[str, Any] = [new_token_a]
snake_case_ : List[str] = [new_token_a]
with open(os.path.join(_UpperCamelCase ,"""special_tokens_map.json""" ) ,"""w""" ,encoding="""utf-8""" ) as outfile:
json.dump(_UpperCamelCase ,_UpperCamelCase )
with open(os.path.join(_UpperCamelCase ,"""tokenizer_config.json""" ) ,"""w""" ,encoding="""utf-8""" ) as outfile:
json.dump(_UpperCamelCase ,_UpperCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
snake_case_ : List[str] = tokenizer_class.from_pretrained(_UpperCamelCase ,extra_ids=0 )
self.assertIn(_UpperCamelCase ,tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] ,tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) ,)
snake_case_ : Dict = 0xE_0_0_7
snake_case_ : Optional[int] = chr(_UpperCamelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
snake_case_ : Optional[int] = [AddedToken(_UpperCamelCase ,lstrip=_UpperCamelCase )]
snake_case_ : List[Any] = tokenizer_class.from_pretrained(
_UpperCamelCase ,additional_special_tokens=_UpperCamelCase ,extra_ids=0 )
self.assertIn(_UpperCamelCase ,tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] ,tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def a__ ( self :List[str] ):
snake_case_ : Union[str, Any] = self.get_tokenizers(do_lower_case=_UpperCamelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_ : Any = """hello world"""
if self.space_between_special_tokens:
snake_case_ : int = """[CLS] hello world [SEP]"""
else:
snake_case_ : List[str] = input
snake_case_ : Any = tokenizer.encode(_UpperCamelCase ,add_special_tokens=_UpperCamelCase )
snake_case_ : Union[str, Any] = tokenizer.decode(_UpperCamelCase ,spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(_UpperCamelCase ,[output, output.lower()] )
def a__ ( self :Dict ):
snake_case_ : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_ : Tuple = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
snake_case_ : Tuple = """a"""
snake_case_ : Optional[Any] = ord(_UpperCamelCase )
for attr in attributes_list:
setattr(_UpperCamelCase ,attr + """_id""" ,_UpperCamelCase )
self.assertEqual(getattr(_UpperCamelCase ,_UpperCamelCase ) ,_UpperCamelCase )
self.assertEqual(getattr(_UpperCamelCase ,attr + """_id""" ) ,_UpperCamelCase )
setattr(_UpperCamelCase ,attr + """_id""" ,_UpperCamelCase )
self.assertEqual(getattr(_UpperCamelCase ,_UpperCamelCase ) ,_UpperCamelCase )
self.assertEqual(getattr(_UpperCamelCase ,attr + """_id""" ) ,_UpperCamelCase )
setattr(_UpperCamelCase ,"""additional_special_tokens_ids""" ,[] )
self.assertListEqual(getattr(_UpperCamelCase ,"""additional_special_tokens""" ) ,[] )
self.assertListEqual(getattr(_UpperCamelCase ,"""additional_special_tokens_ids""" ) ,[] )
snake_case_ : Optional[int] = 0xE_0_0_6
snake_case_ : Union[str, Any] = chr(_UpperCamelCase )
setattr(_UpperCamelCase ,"""additional_special_tokens_ids""" ,[additional_special_token_id] )
self.assertListEqual(getattr(_UpperCamelCase ,"""additional_special_tokens""" ) ,[additional_special_token] )
self.assertListEqual(getattr(_UpperCamelCase ,"""additional_special_tokens_ids""" ) ,[additional_special_token_id] )
def a__ ( self :Dict ):
pass
def a__ ( self :Tuple ):
pass
def a__ ( self :Tuple ):
pass
def a__ ( self :Any ):
pass
def a__ ( self :List[Any] ):
pass
def a__ ( self :Optional[Any] ):
pass
def a__ ( self :Dict ):
pass
def a__ ( self :Optional[int] ):
pass | 267 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase ( lowerCamelCase_ :list , lowerCamelCase_ :list ):
'''simple docstring'''
if len(lowerCamelCase_ ) != 2 or len(a[0] ) != 2 or len(lowerCamelCase_ ) != 2 or len(b[0] ) != 2:
raise Exception("""Matrices are not 2x2""" )
snake_case_ : Tuple = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def UpperCAmelCase ( lowerCamelCase_ :list , lowerCamelCase_ :list ):
'''simple docstring'''
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase_ ) )
]
def UpperCAmelCase ( lowerCamelCase_ :list , lowerCamelCase_ :list ):
'''simple docstring'''
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase_ ) )
]
def UpperCAmelCase ( lowerCamelCase_ :list ):
'''simple docstring'''
if len(lowerCamelCase_ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("""Odd matrices are not supported!""" )
snake_case_ : Optional[int] = len(lowerCamelCase_ )
snake_case_ : List[str] = matrix_length // 2
snake_case_ : Union[str, Any] = [[a[i][j] for j in range(lowerCamelCase_ , lowerCamelCase_ )] for i in range(lowerCamelCase_ )]
snake_case_ : Dict = [
[a[i][j] for j in range(lowerCamelCase_ , lowerCamelCase_ )] for i in range(lowerCamelCase_ , lowerCamelCase_ )
]
snake_case_ : str = [[a[i][j] for j in range(lowerCamelCase_ )] for i in range(lowerCamelCase_ )]
snake_case_ : Dict = [[a[i][j] for j in range(lowerCamelCase_ )] for i in range(lowerCamelCase_ , lowerCamelCase_ )]
return top_left, top_right, bot_left, bot_right
def UpperCAmelCase ( lowerCamelCase_ :list ):
'''simple docstring'''
return len(lowerCamelCase_ ), len(matrix[0] )
def UpperCAmelCase ( lowerCamelCase_ :list ):
'''simple docstring'''
print("""\n""".join(str(lowerCamelCase_ ) for line in matrix ) )
def UpperCAmelCase ( lowerCamelCase_ :list , lowerCamelCase_ :list ):
'''simple docstring'''
if matrix_dimensions(lowerCamelCase_ ) == (2, 2):
return default_matrix_multiplication(lowerCamelCase_ , lowerCamelCase_ )
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Any = split_matrix(lowerCamelCase_ )
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = split_matrix(lowerCamelCase_ )
snake_case_ : Any = actual_strassen(lowerCamelCase_ , matrix_subtraction(lowerCamelCase_ , lowerCamelCase_ ) )
snake_case_ : str = actual_strassen(matrix_addition(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
snake_case_ : str = actual_strassen(matrix_addition(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
snake_case_ : List[str] = actual_strassen(lowerCamelCase_ , matrix_subtraction(lowerCamelCase_ , lowerCamelCase_ ) )
snake_case_ : Optional[int] = actual_strassen(matrix_addition(lowerCamelCase_ , lowerCamelCase_ ) , matrix_addition(lowerCamelCase_ , lowerCamelCase_ ) )
snake_case_ : Dict = actual_strassen(matrix_subtraction(lowerCamelCase_ , lowerCamelCase_ ) , matrix_addition(lowerCamelCase_ , lowerCamelCase_ ) )
snake_case_ : int = actual_strassen(matrix_subtraction(lowerCamelCase_ , lowerCamelCase_ ) , matrix_addition(lowerCamelCase_ , lowerCamelCase_ ) )
snake_case_ : List[Any] = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ ) , lowerCamelCase_ )
snake_case_ : List[Any] = matrix_addition(lowerCamelCase_ , lowerCamelCase_ )
snake_case_ : str = matrix_addition(lowerCamelCase_ , lowerCamelCase_ )
snake_case_ : Any = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ ) , lowerCamelCase_ )
# construct the new matrix from our 4 quadrants
snake_case_ : Dict = []
for i in range(len(lowerCamelCase_ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowerCamelCase_ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def UpperCAmelCase ( lowerCamelCase_ :list , lowerCamelCase_ :list ):
'''simple docstring'''
if matrix_dimensions(lowerCamelCase_ )[1] != matrix_dimensions(lowerCamelCase_ )[0]:
snake_case_ : Optional[int] = (
"""Unable to multiply these matrices, please check the dimensions.\n"""
F'''Matrix A: {matrixa}\n'''
F'''Matrix B: {matrixa}'''
)
raise Exception(lowerCamelCase_ )
snake_case_ : Tuple = matrix_dimensions(lowerCamelCase_ )
snake_case_ : Union[str, Any] = matrix_dimensions(lowerCamelCase_ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
snake_case_ : int = max(*lowerCamelCase_ , *lowerCamelCase_ )
snake_case_ : Union[str, Any] = int(math.pow(2 , math.ceil(math.loga(lowerCamelCase_ ) ) ) )
snake_case_ : Dict = matrixa
snake_case_ : Optional[int] = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , lowerCamelCase_ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase_ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase_ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
snake_case_ : Dict = actual_strassen(lowerCamelCase_ , lowerCamelCase_ )
# Removing the additional zeros
for i in range(0 , lowerCamelCase_ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase_ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
__A : Optional[int] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
__A : List[str] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa)) | 267 | 1 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
class a__ ( __snake_case ):
A__ : Tuple = ['input_values', 'padding_mask']
def __init__( self , UpperCAmelCase = 1 , UpperCAmelCase = 2_4_0_0_0 , UpperCAmelCase = 0.0 , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ) -> Tuple:
super().__init__(feature_size=UpperCAmelCase , sampling_rate=UpperCAmelCase , padding_value=UpperCAmelCase , **UpperCAmelCase )
__a = chunk_length_s
__a = overlap
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if padding and truncation:
raise ValueError('Both padding and truncation were set. Make sure you only set one.' )
elif padding is None:
# by default let's pad the inputs
__a = True
__a = bool(
isinstance(UpperCAmelCase , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
__a = [np.asarray(UpperCAmelCase , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(UpperCAmelCase , np.ndarray ):
__a = np.asarray(UpperCAmelCase , dtype=np.floataa )
elif isinstance(UpperCAmelCase , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
__a = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
__a = [np.asarray(UpperCAmelCase ).T]
# verify inputs are valid
for idx, example in enumerate(UpperCAmelCase ):
if example.ndim > 2:
raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' )
__a = None
__a = BatchFeature({'input_values': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
__a = min(array.shape[0] for array in raw_audio )
__a = int(np.floor(max_length / self.chunk_stride ) )
__a = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
__a = max(array.shape[0] for array in raw_audio )
__a = int(np.ceil(max_length / self.chunk_stride ) )
__a = (nb_step - 1) * self.chunk_stride + self.chunk_length
__a = 'max_length'
else:
__a = input_values
# normal padding on batch
if padded_inputs is None:
__a = self.pad(
UpperCAmelCase , max_length=UpperCAmelCase , truncation=UpperCAmelCase , padding=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
if padding:
__a = padded_inputs.pop('attention_mask' )
__a = []
for example in padded_inputs.pop('input_values' ):
if self.feature_size == 1:
__a = example[..., None]
input_values.append(example.T )
__a = input_values
if return_tensors is not None:
__a = padded_inputs.convert_to_tensors(UpperCAmelCase )
return padded_inputs
| 559 | import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
lowerCamelCase_ : List[Any] = pd.read_csv("""sample_data.csv""", header=None)
lowerCamelCase_ : Optional[Any] = df.shape[:1][0]
# If you're using some other dataset input the target column
lowerCamelCase_ : Union[str, Any] = df.iloc[:, 1:2]
lowerCamelCase_ : str = actual_data.values.reshape(len_data, 1)
lowerCamelCase_ : List[Any] = MinMaxScaler().fit_transform(actual_data)
lowerCamelCase_ : List[str] = 10
lowerCamelCase_ : Tuple = 5
lowerCamelCase_ : Optional[Any] = 20
lowerCamelCase_ : List[Any] = len_data - periods * look_back
lowerCamelCase_ : Union[str, Any] = actual_data[:division]
lowerCamelCase_ : Dict = actual_data[division - look_back :]
lowerCamelCase_ , lowerCamelCase_ : int = [], []
lowerCamelCase_ , lowerCamelCase_ : List[Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
lowerCamelCase_ : Dict = np.array(train_x)
lowerCamelCase_ : Union[str, Any] = np.array(test_x)
lowerCamelCase_ : Optional[Any] = np.array([list(i.ravel()) for i in train_y])
lowerCamelCase_ : Any = np.array([list(i.ravel()) for i in test_y])
lowerCamelCase_ : int = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
lowerCamelCase_ : Optional[int] = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
lowerCamelCase_ : int = model.predict(x_test)
| 559 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : List[str] = '''▁'''
lowercase__ : Union[str, Any] = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowercase__ : Optional[Any] = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
lowercase__ : str = {
'''facebook/xglm-564M''': 20_48,
}
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : str = VOCAB_FILES_NAMES
_lowerCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : List[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self : str , lowercase_ : str , lowercase_ : Any="<s>" , lowercase_ : Any="</s>" , lowercase_ : Dict="</s>" , lowercase_ : List[str]="<s>" , lowercase_ : Tuple="<unk>" , lowercase_ : int="<pad>" , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : str , ):
snake_case_ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
snake_case_ : Tuple = 7
snake_case_ : Tuple = [f"<madeupword{i}>" for i in range(self.num_madeup_words )]
snake_case_ : Union[str, Any] = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
snake_case_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase_ ) )
snake_case_ : Optional[int] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case_ : Optional[Any] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case_ : int = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
snake_case_ : List[str] = len(self.sp_model )
snake_case_ : Any = {f"<madeupword{i}>": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(lowercase_ )
snake_case_ : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ):
snake_case_ : Dict = self.__dict__.copy()
snake_case_ : Tuple = None
snake_case_ : Dict = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Any , lowercase_ : List[Any] ):
snake_case_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case_ : Any = {}
snake_case_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _snake_case ( self : Dict , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
snake_case_ : Dict = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _snake_case ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase_ ))
return [1] + ([0] * len(lowercase_ )) + [1, 1] + ([0] * len(lowercase_ ))
def _snake_case ( self : str , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
snake_case_ : Union[str, Any] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _snake_case ( self : Dict ):
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _snake_case ( self : Union[str, Any] ):
snake_case_ : Union[str, Any] = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self : List[str] , lowercase_ : str ):
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def _snake_case ( self : int , lowercase_ : str ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case_ : List[Any] = self.sp_model.PieceToId(lowercase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self : Any , lowercase_ : int ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self : Tuple , lowercase_ : Union[str, Any] ):
snake_case_ : str = ''''''.join(lowercase_ ).replace(lowercase_ , ''' ''' ).strip()
return out_string
def _snake_case ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[str] = None ):
if not os.path.isdir(lowercase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case_ : Union[str, Any] = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , '''wb''' ) as fi:
snake_case_ : Tuple = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 718 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : Tuple = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : Dict = """roberta"""
def __init__( self : int , lowercase_ : Any=50265 , lowercase_ : Union[str, Any]=768 , lowercase_ : Optional[int]=12 , lowercase_ : List[str]=12 , lowercase_ : Dict=3072 , lowercase_ : Tuple="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : List[str]=512 , lowercase_ : List[Any]=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : List[Any]=1E-12 , lowercase_ : Optional[int]=1 , lowercase_ : Tuple=0 , lowercase_ : str=2 , lowercase_ : int="absolute" , lowercase_ : str=True , lowercase_ : Tuple=None , **lowercase_ : str , ):
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
snake_case_ : str = vocab_size
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Any = num_hidden_layers
snake_case_ : List[str] = num_attention_heads
snake_case_ : Dict = hidden_act
snake_case_ : List[Any] = intermediate_size
snake_case_ : Optional[int] = hidden_dropout_prob
snake_case_ : List[str] = attention_probs_dropout_prob
snake_case_ : Any = max_position_embeddings
snake_case_ : Optional[Any] = type_vocab_size
snake_case_ : Optional[int] = initializer_range
snake_case_ : Dict = layer_norm_eps
snake_case_ : Optional[int] = position_embedding_type
snake_case_ : str = use_cache
snake_case_ : Tuple = classifier_dropout
class _UpperCAmelCase ( lowerCAmelCase__):
@property
def _snake_case ( self : Any ):
if self.task == "multiple-choice":
snake_case_ : List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case_ : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 485 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _snake_case ( lowerCamelCase__ ):
'''simple docstring'''
__snake_case = ['image_processor', 'tokenizer']
__snake_case = 'BridgeTowerImageProcessor'
__snake_case = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self: Optional[int] , __UpperCamelCase: List[Any] , __UpperCamelCase: Dict ) -> Dict:
super().__init__(__lowercase , __lowercase )
def __call__( self: Any , __UpperCamelCase: Tuple , __UpperCamelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __UpperCamelCase: bool = True , __UpperCamelCase: Union[bool, str, PaddingStrategy] = False , __UpperCamelCase: Union[bool, str, TruncationStrategy] = None , __UpperCamelCase: Optional[int] = None , __UpperCamelCase: int = 0 , __UpperCamelCase: Optional[int] = None , __UpperCamelCase: Optional[bool] = None , __UpperCamelCase: Optional[bool] = None , __UpperCamelCase: bool = False , __UpperCamelCase: bool = False , __UpperCamelCase: bool = False , __UpperCamelCase: bool = False , __UpperCamelCase: bool = True , __UpperCamelCase: Optional[Union[str, TensorType]] = None , **__UpperCamelCase: Dict , ) -> Tuple:
__magic_name__ : Union[str, Any] = self.tokenizer(
text=__lowercase , add_special_tokens=__lowercase , padding=__lowercase , truncation=__lowercase , max_length=__lowercase , stride=__lowercase , pad_to_multiple_of=__lowercase , return_token_type_ids=__lowercase , return_attention_mask=__lowercase , return_overflowing_tokens=__lowercase , return_special_tokens_mask=__lowercase , return_offsets_mapping=__lowercase , return_length=__lowercase , verbose=__lowercase , return_tensors=__lowercase , **__lowercase , )
# add pixel_values + pixel_mask
__magic_name__ : Tuple = self.image_processor(
__lowercase , return_tensors=__lowercase , do_normalize=__lowercase , do_center_crop=__lowercase , **__lowercase )
encoding.update(__lowercase )
return encoding
def lowerCAmelCase__ ( self: List[str] , *__UpperCamelCase: str , **__UpperCamelCase: Optional[int] ) -> Tuple:
return self.tokenizer.batch_decode(*__lowercase , **__lowercase )
def lowerCAmelCase__ ( self: int , *__UpperCamelCase: Union[str, Any] , **__UpperCamelCase: str ) -> List[Any]:
return self.tokenizer.decode(*__lowercase , **__lowercase )
@property
def lowerCAmelCase__ ( self: str ) -> Dict:
__magic_name__ : Optional[int] = self.tokenizer.model_input_names
__magic_name__ : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 436 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCamelCase__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""GPTSw3Tokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 225 | 0 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class lowerCamelCase__ :
def __init__( self : Dict , _lowercase : List[Any] , _lowercase : Optional[int]=14 , _lowercase : Optional[Any]=7 , _lowercase : Tuple=True , _lowercase : int=True , _lowercase : Any=False , _lowercase : Union[str, Any]=True , _lowercase : Union[str, Any]=99 , _lowercase : List[Any]=32 , _lowercase : List[str]=4 , _lowercase : Optional[int]=4 , _lowercase : Union[str, Any]=4 , _lowercase : int=37 , _lowercase : List[str]="gelu" , _lowercase : str=0.1 , _lowercase : Optional[int]=0.1 , _lowercase : List[Any]=512 , _lowercase : Union[str, Any]=0.0_2 , ):
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_mask
A = use_token_type_ids
A = use_labels
A = vocab_size
A = hidden_size
A = rotary_dim
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = initializer_range
A = None
A = vocab_size - 1
A = vocab_size - 1
A = vocab_size - 1
def __a ( self : int ):
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_lowercase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __a ( self : List[str] ):
A = self.prepare_config_and_inputs()
A , A , A = config_and_inputs
A = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __a ( self : Any , _lowercase : List[Any] , _lowercase : List[Any] , _lowercase : List[str] , _lowercase : Optional[Any] ):
A = 20
A = model_class_name(_lowercase )
A = model.init_cache(input_ids.shape[0] , _lowercase )
A = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
A = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
A = model(
input_ids[:, :-1] , attention_mask=_lowercase , past_key_values=_lowercase , position_ids=_lowercase , )
A = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
A = model(
input_ids[:, -1:] , attention_mask=_lowercase , past_key_values=outputs_cache.past_key_values , position_ids=_lowercase , )
A = model(_lowercase )
A = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )
def __a ( self : Dict , _lowercase : List[Any] , _lowercase : int , _lowercase : Union[str, Any] , _lowercase : List[str] ):
A = 20
A = model_class_name(_lowercase )
A = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
A = model.init_cache(input_ids.shape[0] , _lowercase )
A = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
A = model(
input_ids[:, :-1] , attention_mask=_lowercase , past_key_values=_lowercase , position_ids=_lowercase , )
A = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
A = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_lowercase , position_ids=_lowercase , )
A = model(_lowercase , attention_mask=_lowercase )
A = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )
@require_flax
class lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowerCAmelCase = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __a ( self : List[str] ):
A = FlaxGPTJModelTester(self )
def __a ( self : List[str] ):
for model_class_name in self.all_model_classes:
A , A , A = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_lowercase , _lowercase , _lowercase , _lowercase )
def __a ( self : List[Any] ):
for model_class_name in self.all_model_classes:
A , A , A = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_lowercase , _lowercase , _lowercase , _lowercase )
@tooslow
def __a ( self : Optional[Any] ):
A = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
A = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=_lowercase , truncation=_lowercase )
A = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
A = False
A = model.config.eos_token_id
A = jax.jit(model.generate )
A = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
A = tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase )
A = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(_lowercase , _lowercase )
@is_pt_flax_cross_test
def __a ( self : Union[str, Any] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
A = self._prepare_for_class(_lowercase , _lowercase )
A = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
A = model_class.__name__[4:] # Skip the "Flax" at the beginning
A = getattr(_lowercase , _lowercase )
A , A = pt_inputs['input_ids'].shape
A = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_lowercase ):
A = 0
A = 1
A = 0
A = 1
A = pt_model_class(_lowercase ).eval()
A = model_class(_lowercase , dtype=jnp.floataa )
A = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _lowercase )
A = fx_state
with torch.no_grad():
A = pt_model(**_lowercase ).to_tuple()
A = fx_model(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_lowercase , _lowercase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_lowercase )
A = model_class.from_pretrained(_lowercase , from_pt=_lowercase )
A = fx_model_loaded(**_lowercase ).to_tuple()
self.assertEqual(
len(_lowercase ) , len(_lowercase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(_lowercase , _lowercase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def __a ( self : Dict ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
A = self._prepare_for_class(_lowercase , _lowercase )
A = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
A = model_class.__name__[4:] # Skip the "Flax" at the beginning
A = getattr(_lowercase , _lowercase )
A = pt_model_class(_lowercase ).eval()
A = model_class(_lowercase , dtype=jnp.floataa )
A = load_flax_weights_in_pytorch_model(_lowercase , fx_model.params )
A , A = pt_inputs['input_ids'].shape
A = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_lowercase ):
A = 0
A = 1
A = 0
A = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
A = pt_model(**_lowercase ).to_tuple()
A = fx_model(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_lowercase , _lowercase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_lowercase )
A = pt_model_class.from_pretrained(_lowercase , from_flax=_lowercase )
with torch.no_grad():
A = pt_model_loaded(**_lowercase ).to_tuple()
self.assertEqual(
len(_lowercase ) , len(_lowercase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_lowercase , _lowercase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def __a ( self : Optional[Any] ):
for model_class_name in self.all_model_classes:
A = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
A = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
| 720 |
"""simple docstring"""
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
UpperCamelCase : Optional[Any] = float("nan")
class lowerCamelCase__ :
def __init__( self : List[str] , _lowercase : Union[str, Any] ):
A = sys.stdout
A = open(_lowercase , 'a' )
def __getattr__( self : Tuple , _lowercase : Union[str, Any] ):
return getattr(self.stdout , _lowercase )
def __a ( self : Optional[int] , _lowercase : Union[str, Any] ):
self.stdout.write(_lowercase )
# strip tqdm codes
self.file.write(re.sub(R'^.*\r' , '' , _lowercase , 0 , re.M ) )
def __snake_case ( UpperCamelCase__=80 , UpperCamelCase__=False ) -> Union[str, Any]:
"""simple docstring"""
A = []
# deal with critical env vars
A = ['CUDA_VISIBLE_DEVICES']
for key in env_keys:
A = os.environ.get(UpperCamelCase__ , UpperCamelCase__ )
if val is not None:
cmd.append(f'{key}={val}' )
# python executable (not always needed if the script is executable)
A = sys.executable if full_python_path else sys.executable.split('/' )[-1]
cmd.append(UpperCamelCase__ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
A = []
A = ''
while len(UpperCamelCase__ ) > 0:
current_line += f'{cmd.pop(0 )} '
if len(UpperCamelCase__ ) == 0 or len(UpperCamelCase__ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(UpperCamelCase__ )
A = ''
return "\\\n".join(UpperCamelCase__ )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
A = re.sub(r'[\\\n]+' , ' ' , args.base_cmd )
# remove --output_dir if any and set our own
A = re.sub('--output_dir\s+[^\s]+' , '' , args.base_cmd )
args.base_cmd += f' --output_dir {output_dir}'
# ensure we have --overwrite_output_dir
A = re.sub('--overwrite_output_dir\s+' , '' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
"""simple docstring"""
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 1_0.3_1, 1_0_0.2, 5_5.6_6_6_6, 2_2_2.2_2_2_2_2_2_2_2] )} , )
A = subprocess.run(UpperCamelCase__ , capture_output=UpperCamelCase__ , text=UpperCamelCase__ )
if verbose:
print('STDOUT' , result.stdout )
print('STDERR' , result.stderr )
# save the streams
A = variation.replace(' ' , '-' )
with open(Path(UpperCamelCase__ ) / f'log.{prefix}.stdout.txt' , 'w' ) as f:
f.write(result.stdout )
with open(Path(UpperCamelCase__ ) / f'log.{prefix}.stderr.txt' , 'w' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('failed' )
return {target_metric_key: nan}
with io.open(f'{output_dir}/all_results.json' , 'r' , encoding='utf-8' ) as f:
A = json.load(UpperCamelCase__ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> str:
"""simple docstring"""
A = []
A = []
A = f'{id}: {variation:<{longest_variation_len}}'
A = f'{preamble}: '
A = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(UpperCamelCase__ ) , desc=UpperCamelCase__ , leave=UpperCamelCase__ ):
A = process_run_single(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A = single_run_metrics[target_metric_key]
if not math.isnan(UpperCamelCase__ ):
metrics.append(UpperCamelCase__ )
results.append(UpperCamelCase__ )
outcome += "✓"
else:
outcome += "✘"
A = f'\33[2K\r{outcome}'
if len(UpperCamelCase__ ) > 0:
A = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
A = round(mean_metrics[target_metric_key] , 2 )
A = f'{outcome} {mean_target}'
if len(UpperCamelCase__ ) > 1:
results_str += f' {tuple(round(UpperCamelCase__ , 2 ) for x in results )}'
print(UpperCamelCase__ )
A = variation
return mean_metrics
else:
print(UpperCamelCase__ )
return {variation_key: variation, target_metric_key: nan}
def __snake_case ( ) -> List[Any]:
"""simple docstring"""
A = torch.cuda.get_device_properties(torch.device('cuda' ) )
return f'\nDatetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n'
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
A = pd.DataFrame(UpperCamelCase__ )
A = 'variation'
A = 'diff_%'
A = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
A = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(UpperCamelCase__ ):
# as a fallback, use the minimal value as the sentinel
A = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(UpperCamelCase__ ):
A = df.apply(
lambda UpperCamelCase__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='columns' , )
# re-order columns
A = [variation_key, target_metric_key, diff_key, *report_metric_keys]
A = df.reindex(UpperCamelCase__ , axis='columns' ) # reorder cols
# capitalize
A = df.rename(str.capitalize , axis='columns' )
# make the cols as narrow as possible
A = df.rename(lambda UpperCamelCase__ : c.replace('_' , '<br>' ) , axis='columns' )
A = df.rename(lambda UpperCamelCase__ : c.replace('_' , '\n' ) , axis='columns' )
A = ['', 'Copy between the cut-here-lines and paste as is to github or a forum']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=UpperCamelCase__ , floatfmt='.2f' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=UpperCamelCase__ , floatfmt='.2f' )]
print('\n\n'.join(UpperCamelCase__ ) )
def __snake_case ( ) -> Union[str, Any]:
"""simple docstring"""
A = argparse.ArgumentParser()
parser.add_argument(
'--base-cmd' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='Base cmd' , )
parser.add_argument(
'--variations' , default=UpperCamelCase__ , type=UpperCamelCase__ , nargs='+' , required=UpperCamelCase__ , help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'' , )
parser.add_argument(
'--base-variation' , default=UpperCamelCase__ , type=UpperCamelCase__ , help='Baseline variation to compare to. if None the minimal target value will be used to compare against' , )
parser.add_argument(
'--target-metric-key' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second' , )
parser.add_argument(
'--report-metric-keys' , default='' , type=UpperCamelCase__ , help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples' , )
parser.add_argument(
'--repeat-times' , default=1 , type=UpperCamelCase__ , help='How many times to re-run each variation - an average will be reported' , )
parser.add_argument(
'--output_dir' , default='output_benchmark' , type=UpperCamelCase__ , help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked' , )
parser.add_argument(
'--verbose' , default=UpperCamelCase__ , action='store_true' , help='Whether to show the outputs of each run or just the benchmark progress' , )
A = parser.parse_args()
A = args.output_dir
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
A = get_base_command(UpperCamelCase__ , UpperCamelCase__ )
# split each dimension into its --foo variations
A = [list(map(str.strip , re.split(r'\|' , UpperCamelCase__ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
A = list(map(str.strip , map(' '.join , itertools.product(*UpperCamelCase__ ) ) ) )
A = max(len(UpperCamelCase__ ) for x in variations )
# split wanted keys
A = args.report_metric_keys.split()
# capture prints into a log file for convenience
A = f'benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'
print(f'\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt' )
print(f'and this script\'s output is also piped into {report_fn}' )
A = Tee(UpperCamelCase__ )
print(f'\n*** Running {len(UpperCamelCase__ )} benchmarks:' )
print(f'Base command: {" ".join(UpperCamelCase__ )}' )
A = 'variation'
A = []
for id, variation in enumerate(tqdm(UpperCamelCase__ , desc='Total completion: ' , leave=UpperCamelCase__ ) ):
A = base_cmd + variation.split()
results.append(
process_run(
id + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.repeat_times , UpperCamelCase__ , args.verbose , ) )
process_results(UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.base_variation , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 91 | 0 |
"""simple docstring"""
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __magic_name__ :
@staticmethod
def lowercase_ ( *A_ , **A_ ) -> Optional[int]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class __magic_name__ ( unittest.TestCase ):
@require_torch
def lowercase_ ( self ) -> int:
"""simple docstring"""
_lowercase: Optional[Any] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
_lowercase: List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_lowercase: str = image_classifier(A_ , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(A_ ) , [
[{'''score''': 0.3_33, '''label''': '''a'''}, {'''score''': 0.3_33, '''label''': '''b'''}, {'''score''': 0.3_33, '''label''': '''c'''}],
[{'''score''': 0.3_33, '''label''': '''a'''}, {'''score''': 0.3_33, '''label''': '''c'''}, {'''score''': 0.3_33, '''label''': '''b'''}],
] , )
_lowercase: Any = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(A_ ) , [
[
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
],
[
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
],
[
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
],
[
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
],
[
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
],
] , )
@require_tf
def lowercase_ ( self ) -> int:
"""simple docstring"""
_lowercase: Union[str, Any] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
_lowercase: List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_lowercase: Union[str, Any] = image_classifier(A_ , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(A_ ) , [{'''score''': 0.3_33, '''label''': '''a'''}, {'''score''': 0.3_33, '''label''': '''b'''}, {'''score''': 0.3_33, '''label''': '''c'''}] , )
_lowercase: List[str] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(A_ ) , [
[
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
],
[
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
],
[
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
],
[
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
],
[
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
],
] , )
@slow
@require_torch
def lowercase_ ( self ) -> Any:
"""simple docstring"""
_lowercase: Tuple = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
_lowercase: str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_lowercase: Optional[Any] = image_classifier(A_ , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(A_ ) , [
{'''score''': 0.5_11, '''label''': '''remote'''},
{'''score''': 0.4_85, '''label''': '''cat'''},
{'''score''': 0.0_04, '''label''': '''plane'''},
] , )
_lowercase: Optional[Any] = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(A_ ) , [
[
{'''score''': 0.5_11, '''label''': '''remote'''},
{'''score''': 0.4_85, '''label''': '''cat'''},
{'''score''': 0.0_04, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowercase: Tuple = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
_lowercase: str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_lowercase: Optional[int] = image_classifier(A_ , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(A_ ) , [
{'''score''': 0.5_11, '''label''': '''remote'''},
{'''score''': 0.4_85, '''label''': '''cat'''},
{'''score''': 0.0_04, '''label''': '''plane'''},
] , )
_lowercase: Optional[Any] = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(A_ ) , [
[
{'''score''': 0.5_11, '''label''': '''remote'''},
{'''score''': 0.4_85, '''label''': '''cat'''},
{'''score''': 0.0_04, '''label''': '''plane'''},
],
]
* 5 , )
| 353 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
A__ : Tuple = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: Optional[int] = Github(os.environ['''GITHUB_TOKEN'''] )
_lowercase: Optional[int] = g.get_repo('''huggingface/diffusers''' )
_lowercase: Union[str, Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
_lowercase: List[Any] = sorted(issue.get_comments() , key=lambda _UpperCamelCase : i.created_at , reverse=_UpperCamelCase )
_lowercase: Optional[Any] = comments[0] if len(_UpperCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 353 | 1 |
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__magic_name__ : str = """base_with_context"""
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
_snake_case = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=SCREAMING_SNAKE_CASE__ )
for lyr_num, lyr in enumerate(model.encoders ):
_snake_case = weights[f'''layers_{lyr_num}''']
_snake_case = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
_snake_case = ly_weight["attention"]
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
_snake_case = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=SCREAMING_SNAKE_CASE__ )
for lyr_num, lyr in enumerate(model.encoders ):
_snake_case = weights[f'''layers_{lyr_num}''']
_snake_case = ly_weight["attention"]
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
_snake_case = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
_snake_case = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
_snake_case = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=SCREAMING_SNAKE_CASE__ )
_snake_case = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
_snake_case = weights[f'''layers_{lyr_num}''']
_snake_case = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
_snake_case = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
_snake_case = ly_weight["self_attention"]
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
_snake_case = ly_weight["MultiHeadDotProductAttention_0"]
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
_snake_case = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
_snake_case = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
_snake_case = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = checkpoints.load_tax_checkpoint(args.checkpoint_path )
_snake_case = jnp.tree_util.tree_map(onp.array , SCREAMING_SNAKE_CASE__ )
_snake_case = [
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
_snake_case = os.path.join(args.checkpoint_path , ".." , "config.gin" )
_snake_case = inference.parse_training_gin_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case = inference.InferenceModel(args.checkpoint_path , SCREAMING_SNAKE_CASE__ )
_snake_case = DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" )
_snake_case = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
_snake_case = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
_snake_case = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
_snake_case = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , SCREAMING_SNAKE_CASE__ )
_snake_case = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , SCREAMING_SNAKE_CASE__ )
_snake_case = load_decoder(ta_checkpoint["target"]["decoder"] , SCREAMING_SNAKE_CASE__ )
_snake_case = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
_snake_case = SpectrogramDiffusionPipeline(
notes_encoder=SCREAMING_SNAKE_CASE__ , continuous_encoder=SCREAMING_SNAKE_CASE__ , decoder=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , melgan=SCREAMING_SNAKE_CASE__ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__magic_name__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=F'{MODEL}/checkpoint_500000',
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
__magic_name__ : Optional[Any] = parser.parse_args()
main(args)
| 368 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=True , lowerCamelCase=1 / 255 , lowerCamelCase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_snake_case = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = min_resolution
_snake_case = max_resolution
_snake_case = do_resize
_snake_case = size
_snake_case = do_normalize
_snake_case = image_mean
_snake_case = image_std
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = do_pad
def UpperCamelCase( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase=False ):
if not batched:
_snake_case = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
_snake_case , _snake_case = image.size
else:
_snake_case , _snake_case = image.shape[1], image.shape[2]
if w < h:
_snake_case = int(self.size["shortest_edge"] * h / w )
_snake_case = self.size["shortest_edge"]
elif w > h:
_snake_case = self.size["shortest_edge"]
_snake_case = int(self.size["shortest_edge"] * w / h )
else:
_snake_case = self.size["shortest_edge"]
_snake_case = self.size["shortest_edge"]
else:
_snake_case = []
for image in image_inputs:
_snake_case , _snake_case = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_snake_case = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
_snake_case = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = DetaImageProcessor if is_vision_available() else None
def UpperCamelCase( self ):
_snake_case = DetaImageProcessingTester(self )
@property
def UpperCamelCase( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase( self ):
_snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
def UpperCamelCase( self ):
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
def UpperCamelCase( self ):
pass
def UpperCamelCase( self ):
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
_snake_case = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase( self ):
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase( self ):
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCamelCase( self ):
# prepare image and target
_snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
_snake_case = json.loads(f.read() )
_snake_case = {"image_id": 39_769, "annotations": target}
# encode them
_snake_case = DetaImageProcessor()
_snake_case = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , return_tensors="pt" )
# verify pixel values
_snake_case = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
_snake_case = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1e-4 ) )
# verify area
_snake_case = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
_snake_case = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
_snake_case = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1e-3 ) )
# verify image_id
_snake_case = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
_snake_case = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
_snake_case = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify orig_size
_snake_case = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
_snake_case = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
@slow
def UpperCamelCase( self ):
# prepare image, target and masks_path
_snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
_snake_case = json.loads(f.read() )
_snake_case = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
_snake_case = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
_snake_case = DetaImageProcessor(format="coco_panoptic" )
_snake_case = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , masks_path=lowerCamelCase , return_tensors="pt" )
# verify pixel values
_snake_case = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
_snake_case = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1e-4 ) )
# verify area
_snake_case = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
_snake_case = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
_snake_case = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1e-3 ) )
# verify image_id
_snake_case = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
_snake_case = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
_snake_case = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify masks
_snake_case = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCamelCase )
# verify orig_size
_snake_case = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
_snake_case = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
| 368 | 1 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = torch.nn.Linear(10 , 10 )
lowerCAmelCase = torch.optim.SGD(model.parameters() , 0.1 )
lowerCAmelCase = Accelerator()
lowerCAmelCase = accelerator.prepare(_snake_case )
try:
pickle.loads(pickle.dumps(_snake_case ) )
except Exception as e:
self.fail(F'Accelerated optimizer pickling failed with {e}' )
AcceleratorState._reset_state()
| 4 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
__UpperCamelCase : Optional[Any] = tuple[int, int]
class a :
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = vertices
lowerCAmelCase = {
(min(_snake_case ), max(_snake_case )): weight for edge, weight in edges.items()
}
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowerCAmelCase = weight
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = Graph({min(self.vertices )} , {} )
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowerCAmelCase = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowerCAmelCase = edge
lowerCAmelCase = weight
subgraph.add_edge(_snake_case , _snake_case )
return subgraph
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "p107_network.txt" ):
lowerCAmelCase = os.path.abspath(os.path.dirname(_UpperCAmelCase ) )
lowerCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = {}
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
with open(_UpperCAmelCase ) as f:
lowerCAmelCase = f.read().strip().split('\n' )
lowerCAmelCase = [line.split(',' ) for line in data]
for edgea in range(1 , len(_UpperCAmelCase ) ):
for edgea in range(_UpperCAmelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
lowerCAmelCase = int(adjaceny_matrix[edgea][edgea] )
lowerCAmelCase = Graph(set(range(len(_UpperCAmelCase ) ) ) , _UpperCAmelCase )
lowerCAmelCase = graph.prims_algorithm()
lowerCAmelCase = sum(graph.edges.values() )
lowerCAmelCase = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 4 | 1 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __lowerCamelCase ( SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE=False ) -> str:
"""simple docstring"""
try:
_UpperCAmelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCAmelCase = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCAmelCase = strtobool(SCREAMING_SNAKE_CASE )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
lowerCAmelCase_ = parse_flag_from_env('''RUN_SLOW''', default=False)
lowerCAmelCase_ = parse_flag_from_env('''RUN_REMOTE''', default=False)
lowerCAmelCase_ = parse_flag_from_env('''RUN_LOCAL''', default=True)
lowerCAmelCase_ = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
lowerCAmelCase_ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
lowerCAmelCase_ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
lowerCAmelCase_ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
lowerCAmelCase_ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
lowerCAmelCase_ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
lowerCAmelCase_ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
lowerCAmelCase_ = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
_UpperCAmelCase = unittest.skip('test requires faiss' )(SCREAMING_SNAKE_CASE )
return test_case
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
try:
import regex # noqa
except ImportError:
_UpperCAmelCase = unittest.skip('test requires regex' )(SCREAMING_SNAKE_CASE )
return test_case
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
_UpperCAmelCase = unittest.skip('test requires elasticsearch' )(SCREAMING_SNAKE_CASE )
return test_case
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
_UpperCAmelCase = unittest.skip('test requires sqlalchemy' )(SCREAMING_SNAKE_CASE )
return test_case
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if not config.TORCH_AVAILABLE:
_UpperCAmelCase = unittest.skip('test requires PyTorch' )(SCREAMING_SNAKE_CASE )
return test_case
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if not config.TF_AVAILABLE:
_UpperCAmelCase = unittest.skip('test requires TensorFlow' )(SCREAMING_SNAKE_CASE )
return test_case
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if not config.JAX_AVAILABLE:
_UpperCAmelCase = unittest.skip('test requires JAX' )(SCREAMING_SNAKE_CASE )
return test_case
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if not config.PIL_AVAILABLE:
_UpperCAmelCase = unittest.skip('test requires Pillow' )(SCREAMING_SNAKE_CASE )
return test_case
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(SCREAMING_SNAKE_CASE )
else:
return test_case
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(SCREAMING_SNAKE_CASE )
else:
return test_case
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(SCREAMING_SNAKE_CASE )
else:
return test_case
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
def _require_spacy_model(SCREAMING_SNAKE_CASE ):
try:
import spacy # noqa F401
spacy.load(SCREAMING_SNAKE_CASE )
except ImportError:
return unittest.skip('test requires spacy' )(SCREAMING_SNAKE_CASE )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(SCREAMING_SNAKE_CASE ) )(SCREAMING_SNAKE_CASE )
else:
return test_case
return _require_spacy_model
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(SCREAMING_SNAKE_CASE )
else:
return test_case
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(SCREAMING_SNAKE_CASE )
else:
return test_case
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
_UpperCAmelCase = unittest.skip('test is slow' )(SCREAMING_SNAKE_CASE )
return test_case
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
_UpperCAmelCase = unittest.skip('test is local' )(SCREAMING_SNAKE_CASE )
return test_case
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
_UpperCAmelCase = unittest.skip('test is packaged' )(SCREAMING_SNAKE_CASE )
return test_case
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
_UpperCAmelCase = unittest.skip('test requires remote' )(SCREAMING_SNAKE_CASE )
return test_case
def __lowerCamelCase ( *SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(SCREAMING_SNAKE_CASE ) and name.startswith('test' ):
for decorator in decorators:
_UpperCAmelCase = decorator(SCREAMING_SNAKE_CASE )
setattr(cls,SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE )
return cls
return decorate
class lowerCAmelCase ( snake_case ):
pass
class lowerCAmelCase ( snake_case ):
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
lowerCAmelCase__ = 2
@contextmanager
def __lowerCamelCase ( SCREAMING_SNAKE_CASE=OfflineSimulationMode.CONNECTION_FAILS,SCREAMING_SNAKE_CASE=1E-16 ) -> str:
"""simple docstring"""
_UpperCAmelCase = requests.Session().request
def timeout_request(SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE,**SCREAMING_SNAKE_CASE ):
# Change the url to an invalid url so that the connection hangs
_UpperCAmelCase = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
_UpperCAmelCase = timeout
try:
return online_request(SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE,**SCREAMING_SNAKE_CASE )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
_UpperCAmelCase = url
_UpperCAmelCase = e.args[0]
_UpperCAmelCase = (max_retry_error.args[0].replace('10.255.255.1',F"""OfflineMock[{url}]""" ),)
_UpperCAmelCase = (max_retry_error,)
raise
def raise_connection_error(SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE,**SCREAMING_SNAKE_CASE ):
raise requests.ConnectionError('Offline mode is enabled.',request=SCREAMING_SNAKE_CASE )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send',SCREAMING_SNAKE_CASE ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request',SCREAMING_SNAKE_CASE ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE',SCREAMING_SNAKE_CASE ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def __lowerCamelCase ( *SCREAMING_SNAKE_CASE,**SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*SCREAMING_SNAKE_CASE,**SCREAMING_SNAKE_CASE ) as tmp_dir:
try:
os.chdir(SCREAMING_SNAKE_CASE )
yield
finally:
os.chdir(SCREAMING_SNAKE_CASE )
@contextmanager
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
import gc
gc.collect()
_UpperCAmelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
import gc
gc.collect()
_UpperCAmelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __lowerCamelCase ( SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return deepcopy(SCREAMING_SNAKE_CASE ).integers(0,100,10 ).tolist() == deepcopy(SCREAMING_SNAKE_CASE ).integers(0,100,10 ).tolist()
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(SCREAMING_SNAKE_CASE,*SCREAMING_SNAKE_CASE,**SCREAMING_SNAKE_CASE ):
try:
return func(*SCREAMING_SNAKE_CASE,**SCREAMING_SNAKE_CASE )
except HTTPError as err:
if str(SCREAMING_SNAKE_CASE ).startswith('500' ) or str(SCREAMING_SNAKE_CASE ).startswith('502' ):
pytest.xfail(str(SCREAMING_SNAKE_CASE ) )
raise err
return decorator.decorator(_wrapper,SCREAMING_SNAKE_CASE )
class lowerCAmelCase :
def __init__( self , a__ , a__ , a__ ):
_UpperCAmelCase = returncode
_UpperCAmelCase = stdout
_UpperCAmelCase = stderr
async def __lowerCamelCase ( SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
while True:
_UpperCAmelCase = await stream.readline()
if line:
callback(SCREAMING_SNAKE_CASE )
else:
break
async def __lowerCamelCase ( SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE=None,SCREAMING_SNAKE_CASE=None,SCREAMING_SNAKE_CASE=None,SCREAMING_SNAKE_CASE=False,SCREAMING_SNAKE_CASE=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('\nRunning: ',' '.join(SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = await asyncio.create_subprocess_exec(
cmd[0],*cmd[1:],stdin=SCREAMING_SNAKE_CASE,stdout=asyncio.subprocess.PIPE,stderr=asyncio.subprocess.PIPE,env=SCREAMING_SNAKE_CASE,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCAmelCase = []
_UpperCAmelCase = []
def tee(SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE="" ):
_UpperCAmelCase = line.decode('utf-8' ).rstrip()
sink.append(SCREAMING_SNAKE_CASE )
if not quiet:
print(SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE,file=SCREAMING_SNAKE_CASE )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout,lambda SCREAMING_SNAKE_CASE : tee(SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE,sys.stdout,label='stdout:' ) ),
_read_stream(p.stderr,lambda SCREAMING_SNAKE_CASE : tee(SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE,sys.stderr,label='stderr:' ) ),
],timeout=SCREAMING_SNAKE_CASE,)
return _RunOutput(await p.wait(),SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE=None,SCREAMING_SNAKE_CASE=None,SCREAMING_SNAKE_CASE=180,SCREAMING_SNAKE_CASE=False,SCREAMING_SNAKE_CASE=True ) -> _RunOutput:
"""simple docstring"""
_UpperCAmelCase = asyncio.get_event_loop()
_UpperCAmelCase = loop.run_until_complete(
_stream_subprocess(SCREAMING_SNAKE_CASE,env=SCREAMING_SNAKE_CASE,stdin=SCREAMING_SNAKE_CASE,timeout=SCREAMING_SNAKE_CASE,quiet=SCREAMING_SNAKE_CASE,echo=SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = ' '.join(SCREAMING_SNAKE_CASE )
if result.returncode > 0:
_UpperCAmelCase = '\n'.join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = os.environ.get('PYTEST_XDIST_WORKER','gw0' )
_UpperCAmelCase = re.sub(R'^gw','',SCREAMING_SNAKE_CASE,0,re.M )
return int(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
_UpperCAmelCase = 29500
_UpperCAmelCase = pytest_xdist_worker_id()
return port + uniq_delta
| 494 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class lowerCAmelCase ( snake_case , unittest.TestCase ):
lowerCAmelCase__ = RoFormerTokenizer
lowerCAmelCase__ = RoFormerTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def __A ( self ):
super().setUp()
def __A ( self , **a__ ):
return self.tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **a__ )
def __A ( self , **a__ ):
return self.rust_tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **a__ )
def __A ( self ):
_UpperCAmelCase = '永和服装饰品有限公司,今天天气非常好'
_UpperCAmelCase = '永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'
return input_text, output_text
def __A ( self ):
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase , _UpperCAmelCase = self.get_chinese_input_output_texts()
_UpperCAmelCase = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , output_text.split() )
_UpperCAmelCase = tokens + [tokenizer.unk_token]
_UpperCAmelCase = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self ):
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase , _UpperCAmelCase = self.get_chinese_input_output_texts()
_UpperCAmelCase = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , output_text.split() )
_UpperCAmelCase = tokens + [tokenizer.unk_token]
_UpperCAmelCase = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self ):
pass
def __A ( self ):
pass
def __A ( self ):
pass
| 494 | 1 |
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : int , lowerCamelCase__ : int ):
'''simple docstring'''
A: list[list[int]] = []
create_all_state(1 , lowerCamelCase__ , lowerCamelCase__ , [] , lowerCamelCase__ )
return result
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : list[int] , lowerCamelCase__ : list[list[int]] , ):
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(lowerCamelCase__ , total_number - level + 2 ):
current_list.append(lowerCamelCase__ )
create_all_state(i + 1 , lowerCamelCase__ , level - 1 , lowerCamelCase__ , lowerCamelCase__ )
current_list.pop()
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : list[list[int]] ):
'''simple docstring'''
for i in total_list:
print(*lowerCamelCase__ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : int =4
__SCREAMING_SNAKE_CASE : Optional[Any] =2
__SCREAMING_SNAKE_CASE : Union[str, Any] =generate_all_combinations(n, k)
print_all_state(total_list)
| 135 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE : Optional[Any] =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple ={'vocab_file': 'spiece.model'}
__SCREAMING_SNAKE_CASE : Any ={
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
__SCREAMING_SNAKE_CASE : Optional[Any] ={
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
__SCREAMING_SNAKE_CASE : Optional[Any] ='▁'
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
"""simple docstring"""
A__ : Any = VOCAB_FILES_NAMES
A__ : Any = PRETRAINED_VOCAB_FILES_MAP
A__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , A , A=True , A=True , A=False , A="[CLS]" , A="[SEP]" , A="<unk>" , A="[SEP]" , A="<pad>" , A="[CLS]" , A="[MASK]" , A = None , **A , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
A: Optional[int] = (
AddedToken(A , lstrip=A , rstrip=A , normalized=A )
if isinstance(A , A )
else mask_token
)
A: Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
A: Tuple = do_lower_case
A: Optional[Any] = remove_space
A: int = keep_accents
A: str = vocab_file
A: Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def a__ ( self ) -> Dict:
return len(self.sp_model )
def a__ ( self ) -> Any:
A: Optional[Any] = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[str]:
A: List[Any] = self.__dict__.copy()
A: List[str] = None
return state
def __setstate__( self , A ) -> Dict:
A: str = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A: Tuple = {}
A: Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a__ ( self , A ) -> List[Any]:
if self.remove_space:
A: str = """ """.join(inputs.strip().split() )
else:
A: Optional[Any] = inputs
A: int = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
A: Tuple = unicodedata.normalize("""NFKD""" , A )
A: Optional[Any] = """""".join([c for c in outputs if not unicodedata.combining(A )] )
if self.do_lower_case:
A: Tuple = outputs.lower()
return outputs
def a__ ( self , A ) -> List[str]:
A: List[str] = self.preprocess_text(A )
A: str = self.sp_model.encode(A , out_type=A )
A: str = []
for piece in pieces:
if len(A ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
A: Any = self.sp_model.EncodeAsPieces(piece[:-1].replace(A , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A: List[Any] = cur_pieces[1:]
else:
A: Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(A )
else:
new_pieces.append(A )
return new_pieces
def a__ ( self , A ) -> Tuple:
return self.sp_model.PieceToId(A )
def a__ ( self , A ) -> List[str]:
return self.sp_model.IdToPiece(A )
def a__ ( self , A ) -> Any:
A: Any = []
A: Dict = """"""
A: int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
A: Dict = True
A: str = []
else:
current_sub_tokens.append(A )
A: List[Any] = False
out_string += self.sp_model.decode(A )
return out_string.strip()
def a__ ( self , A , A = None ) -> List[int]:
A: Any = [self.sep_token_id]
A: Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a__ ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is not None:
return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1]
def a__ ( self , A , A = None ) -> List[int]:
A: List[str] = [self.sep_token_id]
A: List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A: Tuple = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , """wb""" ) as fi:
A: Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 135 | 1 |
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
__lowercase = _modexpt(lowercase__ , exponent // 2 , lowercase__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowercase__ , exponent - 1 , lowercase__ )) % modulo_value
def UpperCAmelCase__ ( lowercase__ = 1_777 , lowercase__ = 1_855 , lowercase__ = 8 ) -> int:
__lowercase = base
for _ in range(1 , lowercase__ ):
__lowercase = _modexpt(lowercase__ , lowercase__ , 10**digits )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 705 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 634 | 0 |
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self , snake_case ) -> Tuple:
_UpperCAmelCase = 3
_UpperCAmelCase = 250
_UpperCAmelCase = ids_tensor((batch_size, length) , snake_case )
_UpperCAmelCase = torch.ones((batch_size, length) , device=snake_case , dtype=torch.float ) / length
return input_ids, scores
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase , _UpperCAmelCase = self._get_tensors(5 )
_UpperCAmelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(snake_case , snake_case ) )
_UpperCAmelCase , _UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(snake_case , snake_case ) )
_UpperCAmelCase , _UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(snake_case , snake_case ) )
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = MaxLengthCriteria(max_length=10 )
_UpperCAmelCase , _UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(snake_case , snake_case ) )
_UpperCAmelCase , _UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(snake_case , snake_case ) )
_UpperCAmelCase , _UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(snake_case , snake_case ) )
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
_UpperCAmelCase , _UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(snake_case , snake_case ) )
_UpperCAmelCase , _UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(snake_case , snake_case ) )
_UpperCAmelCase , _UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(snake_case , snake_case ) )
_UpperCAmelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase , _UpperCAmelCase = self._get_tensors(5 )
_UpperCAmelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(snake_case , snake_case ) )
_UpperCAmelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(snake_case , snake_case ) )
def lowerCamelCase_ ( self ) -> List[str]:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(snake_case ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
_UpperCAmelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(snake_case ) , 1 )
| 573 |
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
lowercase = logging.getLogger(__name__)
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = '''masked_bert'''
def __init__( self , snake_case=30522 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=2 , snake_case=0.02 , snake_case=1E-12 , snake_case=0 , snake_case="topK" , snake_case="constant" , snake_case=0.0 , **snake_case , ) -> str:
super().__init__(pad_token_id=snake_case , **snake_case )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = pruning_method
_UpperCAmelCase = mask_init
_UpperCAmelCase = mask_scale
| 573 | 1 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class __A (datasets.BeamBasedBuilder):
'''simple docstring'''
def lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=UpperCAmelCase_ , )
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any ) ->Tuple:
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(UpperCAmelCase_ )
class __A (datasets.BeamBasedBuilder):
'''simple docstring'''
def lowerCAmelCase ( self : Union[str, Any] ) ->List[str]:
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=UpperCAmelCase_ , )
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] ) ->List[Any]:
"""simple docstring"""
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(UpperCAmelCase_ )
def _a ( ) -> Dict:
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def _a ( ) -> int:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class __A (snake_case__):
'''simple docstring'''
@require_beam
def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ = DummyBeamDataset(cache_dir=UpperCAmelCase_ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(UpperCAmelCase_ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
snake_case_ = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , UpperCAmelCase_ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , UpperCAmelCase_ )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(UpperCAmelCase_ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def lowerCAmelCase ( self : List[str] ) ->Optional[int]:
"""simple docstring"""
import apache_beam as beam
snake_case_ = beam.io.parquetio.WriteToParquet
snake_case_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ = DummyBeamDataset(cache_dir=UpperCAmelCase_ , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
snake_case_ = partial(UpperCAmelCase_ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
UpperCAmelCase_ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
UpperCAmelCase_ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
snake_case_ = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , UpperCAmelCase_ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , UpperCAmelCase_ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(UpperCAmelCase_ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ = DummyBeamDataset(cache_dir=UpperCAmelCase_ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
snake_case_ = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ = NestedBeamDataset(cache_dir=UpperCAmelCase_ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(UpperCAmelCase_ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
snake_case_ = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , UpperCAmelCase_ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , UpperCAmelCase_ )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(UpperCAmelCase_ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 2 |
"""simple docstring"""
from __future__ import annotations
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[int]:
snake_case_ = 0
snake_case_ = len(_SCREAMING_SNAKE_CASE ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
snake_case_ = i + 1
else:
snake_case_ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 2 | 1 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
_snake_case = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
_snake_case = [ord(letter) for letter in string.ascii_lowercase]
_snake_case = {ord(char) for char in VALID_CHARS}
_snake_case = ["the", "be", "to", "of", "and", "in", "that", "have"]
def _UpperCamelCase ( snake_case__, snake_case__ ) -> str | None:
__UpperCAmelCase : List[Any] = ""
__UpperCAmelCase : Union[str, Any] = 42
__UpperCAmelCase : Optional[Any] = 42
__UpperCAmelCase : Tuple = 42
for keychar, cipherchar in zip(cycle(_lowerCamelCase ), _lowerCamelCase ):
__UpperCAmelCase : Any = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(_lowerCamelCase )
return decoded
def _UpperCamelCase ( snake_case__ ) -> list[str]:
__UpperCAmelCase : Optional[Any] = []
for key in product(_lowerCamelCase, repeat=3 ):
__UpperCAmelCase : Union[str, Any] = try_key(_lowerCamelCase, _lowerCamelCase )
if encoded is not None:
possibles.append(_lowerCamelCase )
return possibles
def _UpperCamelCase ( snake_case__, snake_case__ ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def _UpperCamelCase ( snake_case__ = "p059_cipher.txt" ) -> int:
__UpperCAmelCase : Optional[int] = 42
__UpperCAmelCase : List[Any] = 42
__UpperCAmelCase : Dict = 42
__UpperCAmelCase : Dict = 42
__UpperCAmelCase : int = Path(_lowerCamelCase ).parent.joinpath(_lowerCamelCase ).read_text(encoding="utf-8" )
__UpperCAmelCase : Optional[Any] = [int(_lowerCamelCase ) for number in data.strip().split("," )]
__UpperCAmelCase : Any = filter_valid_chars(_lowerCamelCase )
for common_word in COMMON_WORDS:
__UpperCAmelCase : Optional[int] = filter_common_word(_lowerCamelCase, _lowerCamelCase )
if len(_lowerCamelCase ) == 1:
break
__UpperCAmelCase : List[Any] = possibles[0]
return sum(ord(_lowerCamelCase ) for char in decoded_text )
if __name__ == "__main__":
print(F'{solution() = }')
| 382 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase : list[list[int | float]] ) -> int:
lowerCamelCase_ = len(_lowerCamelCase )
lowerCamelCase_ = len(matrix[0] )
lowerCamelCase_ = min(_lowerCamelCase , _lowerCamelCase )
for row in range(_lowerCamelCase ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , _lowerCamelCase ):
lowerCamelCase_ = matrix[col][row] / matrix[row][row]
for i in range(_lowerCamelCase , _lowerCamelCase ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
lowerCamelCase_ = True
for i in range(row + 1 , _lowerCamelCase ):
if matrix[i][row] != 0:
lowerCamelCase_ , lowerCamelCase_ = matrix[i], matrix[row]
lowerCamelCase_ = False
break
if reduce:
rank -= 1
for i in range(_lowerCamelCase ):
lowerCamelCase_ = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 549 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
lowerCamelCase__ = tempfile.mkdtemp()
# fmt: off
lowerCamelCase__ = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
lowerCamelCase__ = dict(zip(_lowerCAmelCase ,range(len(_lowerCAmelCase ) ) ) )
lowerCamelCase__ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
lowerCamelCase__ = {"""unk_token""": """<unk>"""}
lowerCamelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowerCAmelCase ) )
lowerCamelCase__ = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
lowerCamelCase__ = os.path.join(self.tmpdirname ,_lowerCAmelCase )
with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp:
json.dump(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ,**_lowerCAmelCase ):
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**_lowerCAmelCase )
def UpperCamelCase_ ( self ,**_lowerCAmelCase ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**_lowerCAmelCase )
def UpperCamelCase_ ( self ,**_lowerCAmelCase ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase )
def UpperCamelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
lowerCamelCase__ = [Image.fromarray(np.moveaxis(_lowerCAmelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = self.get_rust_tokenizer()
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = CLIPProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase__ = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=_lowerCAmelCase )
lowerCamelCase__ = CLIPProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase__ = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,_lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer ,_lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,_lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
lowerCamelCase__ = self.get_image_processor(do_normalize=_lowerCAmelCase ,padding_value=1.0 )
lowerCamelCase__ = CLIPProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=_lowerCAmelCase ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = CLIPProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = image_processor(_lowerCAmelCase ,return_tensors="""np""" )
lowerCamelCase__ = processor(images=_lowerCAmelCase ,return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = CLIPProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = processor(text=_lowerCAmelCase )
lowerCamelCase__ = tokenizer(_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = CLIPProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = CLIPProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ = processor.batch_decode(_lowerCAmelCase )
lowerCamelCase__ = tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = CLIPProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 9 |
'''simple docstring'''
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase = "▁" ,_lowerCAmelCase = True ,_lowerCAmelCase = "<unk>" ,_lowerCAmelCase = "</s>" ,_lowerCAmelCase = "<pad>" ,):
lowerCamelCase__ = {
"""pad""": {"""id""": 0, """token""": pad_token},
"""eos""": {"""id""": 1, """token""": eos_token},
"""unk""": {"""id""": 2, """token""": unk_token},
}
lowerCamelCase__ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
lowerCamelCase__ = token_dict["""token"""]
lowerCamelCase__ = Tokenizer(Unigram() )
lowerCamelCase__ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(""" {2,}""" ) ,""" """ ),
normalizers.Lowercase(),
] )
lowerCamelCase__ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ),
pre_tokenizers.Digits(individual_digits=_lowerCAmelCase ),
pre_tokenizers.Punctuation(),
] )
lowerCamelCase__ = decoders.Metaspace(replacement=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase )
lowerCamelCase__ = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''' ,special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] ,)
lowerCamelCase__ = {
"""model""": """SentencePieceUnigram""",
"""replacement""": replacement,
"""add_prefix_space""": add_prefix_space,
}
super().__init__(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = 80_00 ,_lowerCAmelCase = True ,):
lowerCamelCase__ = trainers.UnigramTrainer(
vocab_size=_lowerCAmelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCAmelCase ,)
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = [files]
self._tokenizer.train(_lowerCAmelCase ,trainer=_lowerCAmelCase )
self.add_unk_id()
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = 80_00 ,_lowerCAmelCase = True ,):
lowerCamelCase__ = trainers.UnigramTrainer(
vocab_size=_lowerCAmelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCAmelCase ,)
self._tokenizer.train_from_iterator(_lowerCAmelCase ,trainer=_lowerCAmelCase )
self.add_unk_id()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = json.loads(self._tokenizer.to_str() )
lowerCamelCase__ = self.special_tokens["""unk"""]["""id"""]
lowerCamelCase__ = Tokenizer.from_str(json.dumps(_lowerCAmelCase ) )
| 9 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
UpperCamelCase__ = [
'''openmmlab/upernet-convnext-tiny''',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
UpperCamelCase__ = '''UperNetConfig'''
class lowerCamelCase_ ( nn.Module ):
def __init__( self : List[Any] , _A : int , _A : int , _A : Union[int, Tuple[int, int]] , _A : Union[int, Tuple[int, int], str] = 0 , _A : bool = False , _A : Union[int, Tuple[int, int]] = 1 , ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : Union[str, Any] = nn.Convad(
in_channels=_A , out_channels=_A , kernel_size=_A , padding=_A , bias=_A , dilation=_A , )
UpperCAmelCase__ : Union[str, Any] = nn.BatchNormad(_A )
UpperCAmelCase__ : List[str] = nn.ReLU()
def lowercase_ ( self : int , _A : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.conv(_A )
UpperCAmelCase__ : Union[str, Any] = self.batch_norm(_A )
UpperCAmelCase__ : Tuple = self.activation(_A )
return output
class lowerCamelCase_ ( nn.Module ):
def __init__( self : Tuple , _A : int , _A : int , _A : int ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : Optional[int] = [
nn.AdaptiveAvgPoolad(_A ),
UperNetConvModule(_A , _A , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(_A ) , _A )
def lowercase_ ( self : Dict , _A : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = input
for layer in self.layers:
UpperCAmelCase__ : Any = layer(_A )
return hidden_state
class lowerCamelCase_ ( nn.Module ):
def __init__( self : Optional[Any] , _A : Tuple[int, ...] , _A : int , _A : int , _A : bool ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : Any = pool_scales
UpperCAmelCase__ : Optional[Any] = align_corners
UpperCAmelCase__ : Optional[Any] = in_channels
UpperCAmelCase__ : Tuple = channels
UpperCAmelCase__ : Union[str, Any] = []
for i, pool_scale in enumerate(_A ):
UpperCAmelCase__ : Optional[Any] = UperNetPyramidPoolingBlock(pool_scale=_A , in_channels=_A , channels=_A )
self.blocks.append(_A )
self.add_module(str(_A ) , _A )
def lowercase_ ( self : int , _A : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase__ : Dict = []
for ppm in self.blocks:
UpperCAmelCase__ : Tuple = ppm(_A )
UpperCAmelCase__ : int = nn.functional.interpolate(
_A , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(_A )
return ppm_outs
class lowerCamelCase_ ( nn.Module ):
def __init__( self : List[str] , _A : Union[str, Any] , _A : Union[str, Any] ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : List[str] = config
UpperCAmelCase__ : int = config.pool_scales # e.g. (1, 2, 3, 6)
UpperCAmelCase__ : int = in_channels
UpperCAmelCase__ : Dict = config.hidden_size
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : str = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
UpperCAmelCase__ : Optional[Any] = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
UpperCAmelCase__ : Tuple = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
UpperCAmelCase__ : int = nn.ModuleList()
UpperCAmelCase__ : Union[str, Any] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
UpperCAmelCase__ : str = UperNetConvModule(_A , self.channels , kernel_size=1 )
UpperCAmelCase__ : Optional[Any] = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(_A )
self.fpn_convs.append(_A )
UpperCAmelCase__ : List[Any] = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
self.apply(self._init_weights )
def lowercase_ ( self : Dict , _A : str ):
'''simple docstring'''
if isinstance(_A , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def lowercase_ ( self : Any , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = inputs[-1]
UpperCAmelCase__ : List[str] = [x]
psp_outs.extend(self.psp_modules(_A ) )
UpperCAmelCase__ : Dict = torch.cat(_A , dim=1 )
UpperCAmelCase__ : Tuple = self.bottleneck(_A )
return output
def lowercase_ ( self : str , _A : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(_A ) )
# build top-down path
UpperCAmelCase__ : int = len(_A )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCAmelCase__ : Dict = laterals[i - 1].shape[2:]
UpperCAmelCase__ : List[str] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=_A , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
UpperCAmelCase__ : List[str] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCAmelCase__ : List[str] = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
UpperCAmelCase__ : Optional[Any] = torch.cat(_A , dim=1 )
UpperCAmelCase__ : int = self.fpn_bottleneck(_A )
UpperCAmelCase__ : List[str] = self.classifier(_A )
return output
class lowerCamelCase_ ( nn.Module ):
def __init__( self : str , _A : List[Any] , _A : int = 2 , _A : int = 3 , _A : Union[int, Tuple[int, int]] = 1 ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : int = config
UpperCAmelCase__ : Union[str, Any] = config.auxiliary_in_channels
UpperCAmelCase__ : Optional[int] = config.auxiliary_channels
UpperCAmelCase__ : Optional[Any] = config.auxiliary_num_convs
UpperCAmelCase__ : List[str] = config.auxiliary_concat_input
UpperCAmelCase__ : Dict = in_index
UpperCAmelCase__ : Union[str, Any] = (kernel_size // 2) * dilation
UpperCAmelCase__ : Optional[int] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=_A , padding=_A , dilation=_A ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=_A , padding=_A , dilation=_A ) )
if self.num_convs == 0:
UpperCAmelCase__ : Union[str, Any] = nn.Identity()
else:
UpperCAmelCase__ : str = nn.Sequential(*_A )
if self.concat_input:
UpperCAmelCase__ : List[Any] = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=_A , padding=kernel_size // 2 )
UpperCAmelCase__ : str = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
self.apply(self._init_weights )
def lowercase_ ( self : Any , _A : Dict ):
'''simple docstring'''
if isinstance(_A , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def lowercase_ ( self : List[str] , _A : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = encoder_hidden_states[self.in_index]
UpperCAmelCase__ : List[Any] = self.convs(_A )
if self.concat_input:
UpperCAmelCase__ : Dict = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
UpperCAmelCase__ : List[Any] = self.classifier(_A )
return output
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = UperNetConfig
lowerCAmelCase__ = 'pixel_values'
lowerCAmelCase__ = True
def lowercase_ ( self : Union[str, Any] , _A : str ):
'''simple docstring'''
if isinstance(_A , _A ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def lowercase_ ( self : str ):
'''simple docstring'''
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def lowercase_ ( self : Optional[int] , _A : int , _A : Union[str, Any]=False ):
'''simple docstring'''
if isinstance(_A , _A ):
UpperCAmelCase__ : Union[str, Any] = value
UpperCamelCase__ = R'''
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
UpperCamelCase__ = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.' , __a , )
class lowerCamelCase_ ( __a ):
def __init__( self : Tuple , _A : List[Any] ):
'''simple docstring'''
super().__init__(_A )
UpperCAmelCase__ : int = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
UpperCAmelCase__ : List[str] = UperNetHead(_A , in_channels=self.backbone.channels )
UpperCAmelCase__ : int = UperNetFCNHead(_A ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=_A , config_class=_CONFIG_FOR_DOC )
def lowercase_ ( self : Union[str, Any] , _A : Optional[torch.Tensor] = None , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : Optional[torch.Tensor] = None , _A : Optional[bool] = None , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase__ : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase__ : List[Any] = output_attentions if output_attentions is not None else self.config.output_attentions
UpperCAmelCase__ : Union[str, Any] = self.backbone.forward_with_filtered_kwargs(
_A , output_hidden_states=_A , output_attentions=_A )
UpperCAmelCase__ : Optional[Any] = outputs.feature_maps
UpperCAmelCase__ : Optional[Any] = self.decode_head(_A )
UpperCAmelCase__ : Optional[int] = nn.functional.interpolate(_A , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=_A )
UpperCAmelCase__ : Optional[Any] = None
if self.auxiliary_head is not None:
UpperCAmelCase__ : Dict = self.auxiliary_head(_A )
UpperCAmelCase__ : List[Any] = nn.functional.interpolate(
_A , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=_A )
UpperCAmelCase__ : Tuple = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
UpperCAmelCase__ : str = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
UpperCAmelCase__ : Optional[Any] = loss_fct(_A , _A )
UpperCAmelCase__ : Tuple = loss_fct(_A , _A )
UpperCAmelCase__ : Optional[Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
UpperCAmelCase__ : Optional[Any] = (logits,) + outputs[1:]
else:
UpperCAmelCase__ : Union[str, Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=_A , logits=_A , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 75 |
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ =get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__( __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = XLNetTokenizer
__snake_case = XLNetTokenizerFast
__snake_case = True
__snake_case = True
def UpperCamelCase_ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_SCREAMING_SNAKE_CASE : List[Any] = XLNetTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = "<s>"
_SCREAMING_SNAKE_CASE : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<eod>" )
self.assertEqual(len(__lowerCamelCase ) , 1_0_0_6 )
def UpperCamelCase_ ( self ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : List[Any] = XLNetTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] )
_SCREAMING_SNAKE_CASE : str = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] )
_SCREAMING_SNAKE_CASE : int = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = XLNetTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "",
"i",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["▁he", "ll", "o"] )
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : List[Any] = XLNetTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
@slow
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = XLNetTokenizer.from_pretrained("xlnet-base-cased" )
_SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def UpperCamelCase_ ( self ) -> int:
# fmt: off
_SCREAMING_SNAKE_CASE : List[Any] = {"input_ids": [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="xlnet-base-cased" , revision="c841166438c31ec7ca9a106dee7bb312b73ae511" , ) | 249 | 0 |
def UpperCamelCase_( _A :list , _A :list , _A :int , _A :int , _A :int )-> int:
if index == number_of_items:
return 0
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = knapsack(_A , _A , _A , _A , index + 1 )
if weights[index] <= max_weight:
UpperCamelCase__ = values[index] + knapsack(
_A , _A , _A , max_weight - weights[index] , index + 1 )
return max(_A , _A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
_UpperCamelCase : int = 'data2vec-text'
def __init__( self , snake_case=30522 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=2 , snake_case=0.02 , snake_case=1E-1_2 , snake_case=1 , snake_case=0 , snake_case=2 , snake_case="absolute" , snake_case=True , snake_case=None , **snake_case , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = use_cache
UpperCamelCase__ = classifier_dropout
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
@property
def snake_case__ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 185 | 0 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
_SCREAMING_SNAKE_CASE : Optional[int] = get_tests_dir("""fixtures""")
class UpperCamelCase__ ( unittest.TestCase ):
def __lowercase( self : Optional[int] ) -> str:
# A mock response for an HTTP head request to emulate server down
UpperCamelCase__ : List[str] = mock.Mock()
UpperCamelCase__ : Optional[int] = 5_00
UpperCamelCase__ : List[str] = {}
UpperCamelCase__ : str = HTTPError
UpperCamelCase__ : Optional[int] = {}
# Download this model to make sure it's in the cache.
UpperCamelCase__ : Dict = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''', return_value=_lowercase ) as mock_head:
UpperCamelCase__ : Optional[Any] = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase( self : Dict ) -> Optional[int]:
# This test is for deprecated behavior and can be removed in v5
UpperCamelCase__ : List[Any] = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def __lowercase( self : Optional[Any] ) -> Any:
with self.assertRaises(_lowercase ):
# config is in subfolder, the following should not work without specifying the subfolder
UpperCamelCase__ : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
UpperCamelCase__ : int = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''', subfolder='''feature_extractor''' )
self.assertIsNotNone(_lowercase )
@is_staging_test
class UpperCamelCase__ ( unittest.TestCase ):
@classmethod
def __lowercase( cls : str ) -> Dict:
UpperCamelCase__ : List[str] = TOKEN
HfFolder.save_token(_lowercase )
@classmethod
def __lowercase( cls : Tuple ) -> Dict:
try:
delete_repo(token=cls._token, repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def __lowercase( self : Tuple ) -> Optional[Any]:
UpperCamelCase__ : int = ViTImageProcessor.from_pretrained(_lowercase )
image_processor.push_to_hub('''test-image-processor''', use_auth_token=self._token )
UpperCamelCase__ : Tuple = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowercase, getattr(_lowercase, _lowercase ) )
# Reset repo
delete_repo(token=self._token, repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_lowercase, repo_id='''test-image-processor''', push_to_hub=_lowercase, use_auth_token=self._token )
UpperCamelCase__ : int = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowercase, getattr(_lowercase, _lowercase ) )
def __lowercase( self : int ) -> Any:
UpperCamelCase__ : Any = ViTImageProcessor.from_pretrained(_lowercase )
image_processor.push_to_hub('''valid_org/test-image-processor''', use_auth_token=self._token )
UpperCamelCase__ : Dict = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowercase, getattr(_lowercase, _lowercase ) )
# Reset repo
delete_repo(token=self._token, repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_lowercase, repo_id='''valid_org/test-image-processor-org''', push_to_hub=_lowercase, use_auth_token=self._token )
UpperCamelCase__ : List[Any] = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowercase, getattr(_lowercase, _lowercase ) )
def __lowercase( self : List[str] ) -> int:
CustomImageProcessor.register_for_auto_class()
UpperCamelCase__ : Tuple = CustomImageProcessor.from_pretrained(_lowercase )
image_processor.push_to_hub('''test-dynamic-image-processor''', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map, {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''}, )
UpperCamelCase__ : str = AutoImageProcessor.from_pretrained(
f'{USER}/test-dynamic-image-processor', trust_remote_code=_lowercase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__, '''CustomImageProcessor''' )
| 344 |
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> str:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
UpperCamelCase_ = False
if num < 0:
UpperCamelCase_ = True
UpperCamelCase_ = -num
UpperCamelCase_ = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(SCREAMING_SNAKE_CASE_ ) for e in binary )
return "0b" + "".join(str(SCREAMING_SNAKE_CASE_ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 628 | 0 |
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Tuple:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any ) -> int:
__lowercase = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
__lowercase = key.replace('heads.cmd.mim_head.cls.predictions' , 'mmm_image_head' )
__lowercase = key.replace('heads.cmd.mlm_head.cls.predictions' , 'mmm_text_head' )
__lowercase = key.replace('heads.cmd.itm_head.cls' , 'itm_head' )
__lowercase = key.replace('heads.cmd.itm_head.pooler' , 'itm_head.pooler' )
__lowercase = key.replace('heads.cmd.clip_head.logit_scale' , 'flava.logit_scale' )
__lowercase = key.replace('heads.fairseq_mlm.cls.predictions' , 'mlm_head' )
__lowercase = key.replace('heads.imagenet.mim_head.cls.predictions' , 'mim_head' )
__lowercase = key.replace('mm_text_projection' , 'flava.text_to_mm_projection' )
__lowercase = key.replace('mm_image_projection' , 'flava.image_to_mm_projection' )
__lowercase = key.replace('image_encoder.module' , 'flava.image_model' )
__lowercase = key.replace('text_encoder.module' , 'flava.text_model' )
__lowercase = key.replace('mm_encoder.module.encoder.cls_token' , 'flava.multimodal_model.cls_token' )
__lowercase = key.replace('mm_encoder.module' , 'flava.multimodal_model' )
__lowercase = key.replace('text_projection' , 'flava.text_projection' )
__lowercase = key.replace('image_projection' , 'flava.image_projection' )
__lowercase = value.float()
for key, value in codebook_state_dict.items():
__lowercase = value
return upgrade
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int=None ) -> List[Any]:
if config_path is not None:
__lowercase = FlavaConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
__lowercase = FlavaConfig()
__lowercase = FlavaForPreTraining(SCREAMING_SNAKE_CASE ).eval()
__lowercase = convert_dalle_checkpoint(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , save_checkpoint=SCREAMING_SNAKE_CASE )
if os.path.exists(SCREAMING_SNAKE_CASE ):
__lowercase = torch.load(SCREAMING_SNAKE_CASE , map_location='cpu' )
else:
__lowercase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='cpu' )
__lowercase = upgrade_state_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
hf_model.load_state_dict(SCREAMING_SNAKE_CASE )
__lowercase = hf_model.state_dict()
__lowercase = count_parameters(SCREAMING_SNAKE_CASE )
__lowercase = count_parameters(SCREAMING_SNAKE_CASE ) + count_parameters(SCREAMING_SNAKE_CASE )
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 719 |
import math
import sys
import cva
import numpy as np
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
__lowercase = math.sqrt(SCREAMING_SNAKE_CASE )
__lowercase = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> np.ndarray:
__lowercase = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
__lowercase = np.zeros((kernel_size, kernel_size) )
for i in range(0 , SCREAMING_SNAKE_CASE ):
for j in range(0 , SCREAMING_SNAKE_CASE ):
__lowercase = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int , ) -> np.ndarray:
__lowercase = np.zeros(img.shape )
__lowercase = get_gauss_kernel(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase , __lowercase = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__lowercase = get_slice(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = img_s - img_s[kernel_size // 2, kernel_size // 2]
__lowercase = vec_gaussian(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = np.multiply(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = np.multiply(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = np.sum(SCREAMING_SNAKE_CASE ) / np.sum(SCREAMING_SNAKE_CASE )
__lowercase = val
return imga
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list ) -> tuple:
__lowercase = args[1] if args[1:] else '../image_data/lena.jpg'
__lowercase = float(args[2] ) if args[2:] else 1.0
__lowercase = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__lowercase = int(args[4] )
__lowercase = kernel_size + abs(kernel_size % 2 - 1 )
else:
__lowercase = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ = parse_args(sys.argv)
SCREAMING_SNAKE_CASE__ = cva.imread(filename, 0)
cva.imshow("""input image""", img)
SCREAMING_SNAKE_CASE__ = img / 255
SCREAMING_SNAKE_CASE__ = out.astype("""float32""")
SCREAMING_SNAKE_CASE__ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
SCREAMING_SNAKE_CASE__ = out * 255
SCREAMING_SNAKE_CASE__ = np.uinta(out)
cva.imshow("""output image""", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 688 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCAmelCase_ ( _UpperCamelCase ):
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A , "tf_padding" ) )
self.parent.assertTrue(hasattr(A , "depth_multiplier" ) )
class UpperCAmelCase_ :
def __init__( self : Optional[int] , A : int , A : Any=1_3 , A : Dict=3 , A : List[str]=3_2 , A : Tuple=0.25 , A : Any=8 , A : Union[str, Any]=8 , A : Optional[int]=6 , A : Optional[Any]=3_2 , A : List[str]=True , A : Tuple=True , A : Dict=True , A : str="relu6" , A : Union[str, Any]=1_2_8_0 , A : Any=0.1 , A : Dict=0.02 , A : Any=True , A : List[str]=True , A : Union[str, Any]=1_0 , A : List[str]=None , ):
_UpperCAmelCase : Optional[Any] = parent
_UpperCAmelCase : Dict = batch_size
_UpperCAmelCase : Optional[int] = num_channels
_UpperCAmelCase : Optional[int] = image_size
_UpperCAmelCase : Union[str, Any] = depth_multiplier
_UpperCAmelCase : List[str] = depth_divisible_by
_UpperCAmelCase : List[str] = min_depth
_UpperCAmelCase : Any = expand_ratio
_UpperCAmelCase : str = tf_padding
_UpperCAmelCase : Tuple = output_stride
_UpperCAmelCase : List[str] = first_layer_is_expansion
_UpperCAmelCase : Union[str, Any] = finegrained_output
_UpperCAmelCase : int = hidden_act
_UpperCAmelCase : str = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
_UpperCAmelCase : int = classifier_dropout_prob
_UpperCAmelCase : Optional[Any] = use_labels
_UpperCAmelCase : Union[str, Any] = is_training
_UpperCAmelCase : Any = num_labels
_UpperCAmelCase : Union[str, Any] = initializer_range
_UpperCAmelCase : Optional[Any] = scope
def snake_case_ ( self : int ):
_UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : int = None
_UpperCAmelCase : Tuple = None
if self.use_labels:
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case_ ( self : int ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def snake_case_ ( self : Optional[int] , A : List[str] , A : str , A : str , A : List[Any] ):
_UpperCAmelCase : Optional[Any] = MobileNetVaModel(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : int = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def snake_case_ ( self : Optional[Any] , A : Optional[Any] , A : Any , A : Any , A : Any ):
_UpperCAmelCase : str = self.num_labels
_UpperCAmelCase : List[str] = MobileNetVaForImageClassification(A )
model.to(A )
model.eval()
_UpperCAmelCase : str = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self : Tuple , A : Any , A : Optional[int] , A : Dict , A : List[Any] ):
_UpperCAmelCase : Dict = self.num_labels
_UpperCAmelCase : List[Any] = MobileNetVaForSemanticSegmentation(A )
model.to(A )
model.eval()
_UpperCAmelCase : int = model(A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_UpperCAmelCase : Dict = model(A , labels=A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case_ ( self : Union[str, Any] ):
_UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = config_and_inputs
_UpperCAmelCase : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : int = (
{
'feature-extraction': MobileNetVaModel,
'image-classification': MobileNetVaForImageClassification,
'image-segmentation': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : Any = False
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Tuple = False
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : Dict = MobileNetVaModelTester(self )
_UpperCAmelCase : List[str] = MobileNetVaConfigTester(self , config_class=A , has_text_modality=A )
def snake_case_ ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV2 does not use inputs_embeds" )
def snake_case_ ( self : List[Any] ):
pass
@unittest.skip(reason="MobileNetV2 does not support input and output embeddings" )
def snake_case_ ( self : Optional[Any] ):
pass
@unittest.skip(reason="MobileNetV2 does not output attentions" )
def snake_case_ ( self : List[Any] ):
pass
def snake_case_ ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Union[str, Any] = model_class(A )
_UpperCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Any = [*signature.parameters.keys()]
_UpperCAmelCase : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A )
def snake_case_ ( self : int ):
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def snake_case_ ( self : Tuple ):
def check_hidden_states_output(A : int , A : Optional[Any] , A : Optional[int] ):
_UpperCAmelCase : Optional[int] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**self._prepare_for_class(A , A ) )
_UpperCAmelCase : str = outputs.hidden_states
_UpperCAmelCase : Tuple = 1_6
self.assertEqual(len(A ) , A )
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Tuple = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : Union[str, Any] = True
check_hidden_states_output(A , A , A )
def snake_case_ ( self : Any ):
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def snake_case_ ( self : List[Any] ):
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
@slow
def snake_case_ ( self : int ):
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : List[str] = MobileNetVaModel.from_pretrained(A )
self.assertIsNotNone(A )
def __snake_case ( ) -> str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
@cached_property
def snake_case_ ( self : Dict ):
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v2_1.0_224" ) if is_vision_available() else None
)
@slow
def snake_case_ ( self : Optional[int] ):
_UpperCAmelCase : Dict = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v2_1.0_224" ).to(A )
_UpperCAmelCase : int = self.default_image_processor
_UpperCAmelCase : Any = prepare_img()
_UpperCAmelCase : List[Any] = image_processor(images=A , return_tensors="pt" ).to(A )
# forward pass
with torch.no_grad():
_UpperCAmelCase : int = model(**A )
# verify the logits
_UpperCAmelCase : Optional[int] = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , A )
_UpperCAmelCase : Any = torch.tensor([0.2_445, -1.1_993, 0.1_905] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1e-4 ) )
@slow
def snake_case_ ( self : Any ):
_UpperCAmelCase : List[Any] = MobileNetVaForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" )
_UpperCAmelCase : int = model.to(A )
_UpperCAmelCase : int = MobileNetVaImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" )
_UpperCAmelCase : Union[str, Any] = prepare_img()
_UpperCAmelCase : Dict = image_processor(images=A , return_tensors="pt" ).to(A )
# forward pass
with torch.no_grad():
_UpperCAmelCase : int = model(**A )
_UpperCAmelCase : List[str] = outputs.logits
# verify the logits
_UpperCAmelCase : Optional[int] = torch.Size((1, 2_1, 6_5, 6_5) )
self.assertEqual(logits.shape , A )
_UpperCAmelCase : Optional[int] = torch.tensor(
[
[[17.5_790, 17.7_581, 18.3_355], [18.3_257, 18.4_230, 18.8_973], [18.6_169, 18.8_650, 19.2_187]],
[[-2.1_595, -2.0_977, -2.3_741], [-2.4_226, -2.3_028, -2.6_835], [-2.7_819, -2.5_991, -2.7_706]],
[[4.2_058, 4.8_317, 4.7_638], [4.4_136, 5.0_361, 4.9_383], [4.5_028, 4.9_644, 4.8_734]],
] , device=A , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A , atol=1e-4 ) )
| 289 |
"""simple docstring"""
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCAmelCase_ ( _UpperCamelCase ):
def __lt__( self : int , A : Dict ):
return self[-1] < other[-1]
def __eq__( self : Union[str, Any] , A : Union[str, Any] ):
return self[-1] == other[-1]
def __snake_case ( SCREAMING_SNAKE_CASE__ : list ) -> list:
'''simple docstring'''
_UpperCAmelCase : list[Stack] = []
# sort into stacks
for element in collection:
_UpperCAmelCase : Any = Stack([element] )
_UpperCAmelCase : str = bisect_left(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if i != len(SCREAMING_SNAKE_CASE__ ):
stacks[i].append(SCREAMING_SNAKE_CASE__ )
else:
stacks.append(SCREAMING_SNAKE_CASE__ )
# use a heap-based merge to merge stack efficiently
_UpperCAmelCase : Optional[Any] = merge(*(reversed(SCREAMING_SNAKE_CASE__ ) for stack in stacks) )
return collection
if __name__ == "__main__":
_lowerCAmelCase : Any = input("Enter numbers separated by a comma:\n").strip()
_lowerCAmelCase : int = [int(item) for item in user_input.split(",")]
print(patience_sort(unsorted))
| 289 | 1 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Dict = len(a_ )
print("""The following activities are selected:""" )
# The first activity is always selected
lowerCAmelCase__ : Optional[int] = 0
print(a_ , end=""",""" )
# Consider rest of the activities
for j in range(a_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(a_ , end=""",""" )
lowerCAmelCase__ : Optional[Any] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = [1, 3, 0, 5, 8, 5]
_lowerCAmelCase = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 713 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase = 1 / sqrt(2 ) ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = tau * frequency / samplerate
lowerCAmelCase__ : List[Any] = sin(UpperCamelCase )
lowerCAmelCase__ : List[Any] = cos(UpperCamelCase )
lowerCAmelCase__ : List[Any] = _sin / (2 * q_factor)
lowerCAmelCase__ : Optional[Any] = (1 - _cos) / 2
lowerCAmelCase__ : Tuple = 1 - _cos
lowerCAmelCase__ : Tuple = 1 + alpha
lowerCAmelCase__ : Dict = -2 * _cos
lowerCAmelCase__ : Optional[Any] = 1 - alpha
lowerCAmelCase__ : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase = 1 / sqrt(2 ) ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = tau * frequency / samplerate
lowerCAmelCase__ : Dict = sin(UpperCamelCase )
lowerCAmelCase__ : int = cos(UpperCamelCase )
lowerCAmelCase__ : List[str] = _sin / (2 * q_factor)
lowerCAmelCase__ : Tuple = (1 + _cos) / 2
lowerCAmelCase__ : Optional[int] = -1 - _cos
lowerCAmelCase__ : int = 1 + alpha
lowerCAmelCase__ : int = -2 * _cos
lowerCAmelCase__ : Union[str, Any] = 1 - alpha
lowerCAmelCase__ : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase = 1 / sqrt(2 ) ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = tau * frequency / samplerate
lowerCAmelCase__ : List[Any] = sin(UpperCamelCase )
lowerCAmelCase__ : str = cos(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = _sin / (2 * q_factor)
lowerCAmelCase__ : str = _sin / 2
lowerCAmelCase__ : Union[str, Any] = 0
lowerCAmelCase__ : Optional[Any] = -ba
lowerCAmelCase__ : Union[str, Any] = 1 + alpha
lowerCAmelCase__ : Any = -2 * _cos
lowerCAmelCase__ : Dict = 1 - alpha
lowerCAmelCase__ : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase = 1 / sqrt(2 ) ):
"""simple docstring"""
lowerCAmelCase__ : Dict = tau * frequency / samplerate
lowerCAmelCase__ : Optional[Any] = sin(UpperCamelCase )
lowerCAmelCase__ : Any = cos(UpperCamelCase )
lowerCAmelCase__ : List[Any] = _sin / (2 * q_factor)
lowerCAmelCase__ : Union[str, Any] = 1 - alpha
lowerCAmelCase__ : Dict = -2 * _cos
lowerCAmelCase__ : int = 1 + alpha
lowerCAmelCase__ : Dict = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 1 / sqrt(2 ) , ):
"""simple docstring"""
lowerCAmelCase__ : int = tau * frequency / samplerate
lowerCAmelCase__ : List[str] = sin(UpperCamelCase )
lowerCAmelCase__ : List[Any] = cos(UpperCamelCase )
lowerCAmelCase__ : List[Any] = _sin / (2 * q_factor)
lowerCAmelCase__ : List[Any] = 10 ** (gain_db / 40)
lowerCAmelCase__ : List[str] = 1 + alpha * big_a
lowerCAmelCase__ : List[str] = -2 * _cos
lowerCAmelCase__ : Union[str, Any] = 1 - alpha * big_a
lowerCAmelCase__ : Dict = 1 + alpha / big_a
lowerCAmelCase__ : Optional[int] = -2 * _cos
lowerCAmelCase__ : Tuple = 1 - alpha / big_a
lowerCAmelCase__ : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 1 / sqrt(2 ) , ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = tau * frequency / samplerate
lowerCAmelCase__ : Optional[int] = sin(UpperCamelCase )
lowerCAmelCase__ : Any = cos(UpperCamelCase )
lowerCAmelCase__ : List[Any] = _sin / (2 * q_factor)
lowerCAmelCase__ : List[Any] = 10 ** (gain_db / 40)
lowerCAmelCase__ : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
lowerCAmelCase__ : Any = (big_a + 1) + (big_a - 1) * _cos
lowerCAmelCase__ : str = (big_a - 1) - (big_a + 1) * _cos
lowerCAmelCase__ : Dict = (big_a - 1) + (big_a + 1) * _cos
lowerCAmelCase__ : Optional[Any] = 2 * sqrt(UpperCamelCase ) * alpha
lowerCAmelCase__ : Optional[int] = big_a * (pmc + aaa)
lowerCAmelCase__ : Optional[int] = 2 * big_a * mpc
lowerCAmelCase__ : Union[str, Any] = big_a * (pmc - aaa)
lowerCAmelCase__ : int = ppmc + aaa
lowerCAmelCase__ : List[Any] = -2 * pmpc
lowerCAmelCase__ : Tuple = ppmc - aaa
lowerCAmelCase__ : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 1 / sqrt(2 ) , ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = tau * frequency / samplerate
lowerCAmelCase__ : int = sin(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = cos(UpperCamelCase )
lowerCAmelCase__ : Dict = _sin / (2 * q_factor)
lowerCAmelCase__ : Optional[int] = 10 ** (gain_db / 40)
lowerCAmelCase__ : Dict = (big_a + 1) - (big_a - 1) * _cos
lowerCAmelCase__ : Union[str, Any] = (big_a + 1) + (big_a - 1) * _cos
lowerCAmelCase__ : int = (big_a - 1) - (big_a + 1) * _cos
lowerCAmelCase__ : Optional[Any] = (big_a - 1) + (big_a + 1) * _cos
lowerCAmelCase__ : str = 2 * sqrt(UpperCamelCase ) * alpha
lowerCAmelCase__ : Optional[Any] = big_a * (ppmc + aaa)
lowerCAmelCase__ : List[Any] = -2 * big_a * pmpc
lowerCAmelCase__ : Union[str, Any] = big_a * (ppmc - aaa)
lowerCAmelCase__ : Tuple = pmc + aaa
lowerCAmelCase__ : Dict = 2 * mpc
lowerCAmelCase__ : Union[str, Any] = pmc - aaa
lowerCAmelCase__ : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 160 | 0 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_snake_case : Tuple = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="relu")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="relu"))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_28, activation="relu"))
classifier.add(layers.Dense(units=1, activation="sigmoid"))
# Compiling the CNN
classifier.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_snake_case : Optional[int] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_55, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_snake_case : Any = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_55)
_snake_case : Dict = train_datagen.flow_from_directory(
"dataset/training_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
_snake_case : Dict = test_datagen.flow_from_directory(
"dataset/test_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("cnn.h5")
# Part 3 - Making new predictions
_snake_case : int = tf.keras.preprocessing.image.load_img(
"dataset/single_prediction/image.png", target_size=(64, 64)
)
_snake_case : Any = tf.keras.preprocessing.image.img_to_array(test_image)
_snake_case : int = np.expand_dims(test_image, axis=0)
_snake_case : Optional[Any] = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_snake_case : str = "Normal"
if result[0][0] == 1:
_snake_case : str = "Abnormality detected"
| 441 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_snake_case : Tuple = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="relu")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="relu"))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_28, activation="relu"))
classifier.add(layers.Dense(units=1, activation="sigmoid"))
# Compiling the CNN
classifier.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_snake_case : Optional[int] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_55, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_snake_case : Any = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_55)
_snake_case : Dict = train_datagen.flow_from_directory(
"dataset/training_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
_snake_case : Dict = test_datagen.flow_from_directory(
"dataset/test_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("cnn.h5")
# Part 3 - Making new predictions
_snake_case : int = tf.keras.preprocessing.image.load_img(
"dataset/single_prediction/image.png", target_size=(64, 64)
)
_snake_case : Any = tf.keras.preprocessing.image.img_to_array(test_image)
_snake_case : int = np.expand_dims(test_image, axis=0)
_snake_case : Optional[Any] = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_snake_case : str = "Normal"
if result[0][0] == 1:
_snake_case : str = "Abnormality detected"
| 441 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : Dict = {}
class _UpperCAmelCase ( lowerCAmelCase__ ):
a__ : int = "llama"
a__ : Union[str, Any] = ["past_key_values"]
def __init__( self : Dict , _lowercase : List[Any]=3_20_00 , _lowercase : str=40_96 , _lowercase : Optional[Any]=1_10_08 , _lowercase : List[str]=32 , _lowercase : List[str]=32 , _lowercase : Tuple=None , _lowercase : Tuple="silu" , _lowercase : List[str]=20_48 , _lowercase : List[Any]=0.02 , _lowercase : Tuple=1E-6 , _lowercase : str=True , _lowercase : Any=0 , _lowercase : str=1 , _lowercase : List[Any]=2 , _lowercase : Optional[Any]=1 , _lowercase : Any=False , _lowercase : Any=None , **_lowercase : Tuple , ):
__UpperCAmelCase = vocab_size
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = hidden_size
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = num_key_value_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = initializer_range
__UpperCAmelCase = rms_norm_eps
__UpperCAmelCase = pretraining_tp
__UpperCAmelCase = use_cache
__UpperCAmelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , tie_word_embeddings=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def a ( self : int ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _SCREAMING_SNAKE_CASE ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F'''got {self.rope_scaling}''' )
__UpperCAmelCase = self.rope_scaling.get('''type''' , _SCREAMING_SNAKE_CASE )
__UpperCAmelCase = self.rope_scaling.get('''factor''' , _SCREAMING_SNAKE_CASE )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 709 |
"""simple docstring"""
import qiskit
def lowercase__ ( snake_case_ :int , snake_case_ :int ):
__UpperCAmelCase = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
__UpperCAmelCase = qiskit.QuantumCircuit(snake_case_ , snake_case_ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
__UpperCAmelCase = qiskit.execute(snake_case_ , snake_case_ , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(snake_case_ )
if __name__ == "__main__":
_lowercase : int = single_qubit_measure(2, 2)
print(f"""Total count for various states are: {counts}""")
| 397 | 0 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
__UpperCamelCase : List[Any] = False
try:
__UpperCamelCase : int = _is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class __UpperCamelCase :
def __init__( self : Dict , _lowerCAmelCase : str = None , _lowerCAmelCase : list = [] ) -> Tuple:
"""simple docstring"""
__lowercase = 0
__lowercase = choices
__lowercase = prompt
if sys.platform == "win32":
__lowercase = """*"""
else:
__lowercase = """➔ """
def _a ( self : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : str = "" ) -> str:
"""simple docstring"""
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , _lowerCAmelCase )
else:
forceWrite(self.choices[index] , _lowerCAmelCase )
def _a ( self : Dict , _lowerCAmelCase : int ) -> Any:
"""simple docstring"""
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(_lowerCAmelCase )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def _a ( self : Optional[Any] , _lowerCAmelCase : Direction , _lowerCAmelCase : int = 1 ) -> Dict:
"""simple docstring"""
__lowercase = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(_lowerCAmelCase )
move_cursor(_lowerCAmelCase , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["""up"""] )
def _a ( self : Any ) -> Dict:
"""simple docstring"""
self.move_direction(Direction.UP )
@input.mark(KEYMAP["""down"""] )
def _a ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["""newline"""] )
def _a ( self : Tuple ) -> Dict:
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , """DOWN""" )
return self.position
@input.mark(KEYMAP["""interrupt"""] )
def _a ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , """DOWN""" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(_lowerCAmelCase )] for number in range(10 )] )
def _a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase = int(chr(self.current_selection ) )
__lowercase = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , _lowerCAmelCase )
else:
return
else:
return
def _a ( self : Optional[int] , _lowerCAmelCase : int = 0 ) -> Union[str, Any]:
"""simple docstring"""
if self.prompt:
linebreak()
forceWrite(self.prompt , """\n""" )
if in_colab:
forceWrite("""Please input a choice index (starting from 0), and press enter""" , """\n""" )
else:
forceWrite("""Please select a choice using the arrow or number keys, and selecting with enter""" , """\n""" )
__lowercase = default_choice
for i in range(len(self.choices ) ):
self.print_choice(_lowerCAmelCase )
forceWrite("""\n""" )
move_cursor(len(self.choices ) - self.position , """UP""" )
with cursor.hide():
while True:
if in_colab:
try:
__lowercase = int(builtins.input() )
except ValueError:
__lowercase = default_choice
else:
__lowercase = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , """UP""" )
clear_line()
self.write_choice(_lowerCAmelCase , """\n""" )
return choice
| 80 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a__ : Optional[Any] = 1_6
a__ : str = 3_2
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = 16 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("bert-base-cased" )
__SCREAMING_SNAKE_CASE = load_dataset("glue" , "mrpc" )
def tokenize_function(lowerCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
__SCREAMING_SNAKE_CASE = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__SCREAMING_SNAKE_CASE = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowerCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__SCREAMING_SNAKE_CASE = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__SCREAMING_SNAKE_CASE = 16
elif accelerator.mixed_precision != "no":
__SCREAMING_SNAKE_CASE = 8
else:
__SCREAMING_SNAKE_CASE = None
return tokenizer.pad(
lowerCAmelCase_ , padding="longest" , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors="pt" , )
# Instantiate dataloaders.
__SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["train"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["validation"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a__ : List[Any] = mocked_dataloaders # noqa: F811
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowerCAmelCase_ ) == "1":
__SCREAMING_SNAKE_CASE = 2
# Initialize accelerator
__SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__SCREAMING_SNAKE_CASE = config["lr"]
__SCREAMING_SNAKE_CASE = int(config["num_epochs"] )
__SCREAMING_SNAKE_CASE = int(config["seed"] )
__SCREAMING_SNAKE_CASE = int(config["batch_size"] )
__SCREAMING_SNAKE_CASE = evaluate.load("glue" , "mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowerCAmelCase_ )
def inner_training_loop(lowerCAmelCase_ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__SCREAMING_SNAKE_CASE = model.to(accelerator.device )
# Instantiate optimizer
__SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() , lr=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ )
# Instantiate scheduler
__SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__SCREAMING_SNAKE_CASE = model(**lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = outputs.loss
accelerator.backward(lowerCAmelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
__SCREAMING_SNAKE_CASE = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , lowerCAmelCase_ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 682 | 0 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _lowercase (a_ ):
'''simple docstring'''
def _lowerCamelCase ( self ):
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self._create_example_records()
UpperCamelCase_ = Dataset.from_list(snake_case__ )
self.assertListEqual(dset.column_names , ["col_1", "col_2"] )
for i, r in enumerate(snake_case__ ):
self.assertDictEqual(snake_case__ , example_records[i] )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self._create_example_records()
UpperCamelCase_ = Dataset.from_list(snake_case__ )
UpperCamelCase_ = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def _lowerCamelCase ( self ): # checks what happens with missing columns
'''simple docstring'''
UpperCamelCase_ = [{"col_1": 1}, {"col_2": "x"}]
UpperCamelCase_ = Dataset.from_list(snake_case__ )
self.assertDictEqual(dset[0] , {"col_1": 1} )
self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns
def _lowerCamelCase ( self ): # checks if the type can be inferred from the second record
'''simple docstring'''
UpperCamelCase_ = [{"col_1": []}, {"col_1": [1, 2]}]
UpperCamelCase_ = Dataset.from_list(snake_case__ )
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = Dataset.from_list([] )
self.assertEqual(len(snake_case__ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 719 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = """philschmid/bart-large-cnn-samsum"""
lowercase__ = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
lowercase__ = """summarizer"""
lowercase__ = AutoTokenizer
lowercase__ = AutoModelForSeqaSeqLM
lowercase__ = ["""text"""]
lowercase__ = ["""text"""]
def _lowerCamelCase ( self , snake_case__ ):
'''simple docstring'''
return self.pre_processor(snake_case__ , return_tensors="pt" , truncation=snake_case__ )
def _lowerCamelCase ( self , snake_case__ ):
'''simple docstring'''
return self.model.generate(**snake_case__ )[0]
def _lowerCamelCase ( self , snake_case__ ):
'''simple docstring'''
return self.pre_processor.decode(snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )
| 504 | 0 |
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowerCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(
UpperCAmelCase , R'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' , )
class _snake_case( UpperCAmelCase ):
def _UpperCamelCase (self : Union[str, Any] , a : int ) -> np.ndarray:
"""simple docstring"""
if self.framework == "tf":
A__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
A__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=a )
else:
raise ValueError('Unsupported framework' )
return masked_index
def _UpperCamelCase (self : List[Any] , a : int ) -> np.ndarray:
"""simple docstring"""
A__ = self.get_masked_index(a )
A__ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , f"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def _UpperCamelCase (self : int , a : Optional[Any] ) -> List[str]:
"""simple docstring"""
if isinstance(a , a ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['input_ids'][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(a )
def _UpperCamelCase (self : Dict , a : Optional[int] , a : Optional[int]=None , **a : int ) -> Dict[str, GenericTensor]:
"""simple docstring"""
if return_tensors is None:
A__ = self.framework
A__ = self.tokenizer(a , return_tensors=a )
self.ensure_exactly_one_mask_token(a )
return model_inputs
def _UpperCamelCase (self : int , a : List[str] ) -> Dict:
"""simple docstring"""
A__ = self.model(**a )
A__ = model_inputs['''input_ids''']
return model_outputs
def _UpperCamelCase (self : str , a : List[Any] , a : List[Any]=5 , a : Union[str, Any]=None ) -> Optional[Any]:
"""simple docstring"""
if target_ids is not None and target_ids.shape[0] < top_k:
A__ = target_ids.shape[0]
A__ = model_outputs['''input_ids'''][0]
A__ = model_outputs['''logits''']
if self.framework == "tf":
A__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
A__ = outputs.numpy()
A__ = outputs[0, masked_index, :]
A__ = stable_softmax(a , axis=-1 )
if target_ids is not None:
A__ = tf.gather_nd(tf.squeeze(a , 0 ) , target_ids.reshape(-1 , 1 ) )
A__ = tf.expand_dims(a , 0 )
A__ = tf.math.top_k(a , k=a )
A__ = topk.values.numpy(), topk.indices.numpy()
else:
A__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=a ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
A__ = outputs[0, masked_index, :]
A__ = logits.softmax(dim=-1 )
if target_ids is not None:
A__ = probs[..., target_ids]
A__ = probs.topk(a )
A__ = []
A__ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
A__ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
A__ = input_ids.numpy().copy()
if target_ids is not None:
A__ = target_ids[p].tolist()
A__ = p
# Filter padding out:
A__ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
A__ = self.tokenizer.decode(a , skip_special_tokens=a )
A__ = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(a )
result.append(a )
if single_mask:
return result[0]
return result
def _UpperCamelCase (self : List[str] , a : Any , a : List[str]=None ) -> Any:
"""simple docstring"""
if isinstance(a , a ):
A__ = [targets]
try:
A__ = self.tokenizer.get_vocab()
except Exception:
A__ = {}
A__ = []
for target in targets:
A__ = vocab.get(a , a )
if id_ is None:
A__ = self.tokenizer(
a , add_special_tokens=a , return_attention_mask=a , return_token_type_ids=a , max_length=1 , truncation=a , )['''input_ids''']
if len(a ) == 0:
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
'We cannot replace it with anything meaningful, ignoring it' )
continue
A__ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
f"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
A__ = list(set(a ) )
if len(a ) == 0:
raise ValueError('At least one target must be provided when passed.' )
A__ = np.array(a )
return target_ids
def _UpperCamelCase (self : str , a : List[Any]=None , a : Union[str, Any]=None ) -> List[str]:
"""simple docstring"""
A__ = {}
if targets is not None:
A__ = self.get_target_ids(a , a )
A__ = target_ids
if top_k is not None:
A__ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , 'The tokenizer does not define a `mask_token`.' )
return {}, {}, postprocess_params
def __call__(self : int , a : Dict , *a : Any , **a : Optional[int] ) -> str:
"""simple docstring"""
A__ = super().__call__(a , **a )
if isinstance(a , a ) and len(a ) == 1:
return outputs[0]
return outputs
| 531 |
"""simple docstring"""
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
_UpperCAmelCase : int = []
for line in lines:
_UpperCAmelCase : str = re.sub(r'''#.*''' , '''''' , UpperCamelCase__ ) # remove comments
if line:
filtered_lines.append(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = '''\n'''.join(UpperCamelCase__ )
# Make a hash from all this code
_UpperCAmelCase : Optional[Any] = full_str.encode('''utf-8''' )
return shaaaa(UpperCamelCase__ ).hexdigest()
# get importable module names and hash for caching
_lowerCAmelCase :Optional[Any] = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_lowerCAmelCase :str = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_lowerCAmelCase :Optional[int] = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
_lowerCAmelCase :Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 506 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __magic_name__ ( ) -> List[Any]:
_lowercase : Dict = ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=SCREAMING_SNAKE_CASE , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=SCREAMING_SNAKE_CASE , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=SCREAMING_SNAKE_CASE )
return parser.parse_args()
def __magic_name__ ( ) -> str:
_lowercase : List[Any] = parse_args()
# Import training_script as a module.
_lowercase : int = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_lowercase : int = script_fpath.stem
_lowercase : int = importlib.import_module(SCREAMING_SNAKE_CASE )
# Patch sys.argv
_lowercase : Optional[Any] = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 677 |
from __future__ import annotations
from typing import Any
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase ):
_lowercase : Any = num_of_nodes
_lowercase : list[list[int]] = []
_lowercase : dict[int, int] = {}
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
self.m_edges.append([u_node, v_node, weight] )
def __a ( self , _lowerCAmelCase ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def __a ( self , _lowerCAmelCase ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
_lowercase : Optional[int] = self.find_component(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if component_size[u_node] <= component_size[v_node]:
_lowercase : str = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowerCAmelCase )
elif component_size[u_node] >= component_size[v_node]:
_lowercase : Any = self.find_component(_lowerCAmelCase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowerCAmelCase )
def __a ( self ):
_lowercase : Any = []
_lowercase : Optional[Any] = 0
_lowercase : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_lowercase : str = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_lowercase , _lowercase , _lowercase : List[str] = edge
_lowercase : Union[str, Any] = self.m_component[u]
_lowercase : Union[str, Any] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_lowercase : str = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowercase , _lowercase , _lowercase : int = edge
_lowercase : Optional[int] = self.m_component[u]
_lowercase : Optional[Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
_lowercase : str = [-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def __magic_name__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 677 | 1 |
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
snake_case__ : Any = True
except (ImportError, AttributeError):
snake_case__ : Dict = object
def _snake_case (*__lowercase , **__lowercase):
pass
snake_case__ : Union[str, Any] = False
snake_case__ : Optional[int] = logging.get_logger("""transformers-cli/serving""")
def _snake_case (__lowercase):
UpperCamelCase_ = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(__lowercase , args.host , args.port , args.workers)
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = 42
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = 42
A_ = 42
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = 42
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = 42
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@staticmethod
def _UpperCAmelCase ( _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = parser.add_parser(
'serve' , help='CLI tool to run inference requests through REST and GraphQL endpoints.' )
serve_parser.add_argument(
'--task' , type=_UpperCAmelCase , choices=get_supported_tasks() , help='The task to run the pipeline on' , )
serve_parser.add_argument('--host' , type=_UpperCAmelCase , default='localhost' , help='Interface the server will listen on.' )
serve_parser.add_argument('--port' , type=_UpperCAmelCase , default=8888 , help='Port the serving will listen to.' )
serve_parser.add_argument('--workers' , type=_UpperCAmelCase , default=1 , help='Number of http workers' )
serve_parser.add_argument('--model' , type=_UpperCAmelCase , help='Model\'s name or path to stored model.' )
serve_parser.add_argument('--config' , type=_UpperCAmelCase , help='Model\'s config name or path to stored model.' )
serve_parser.add_argument('--tokenizer' , type=_UpperCAmelCase , help='Tokenizer name to use.' )
serve_parser.add_argument(
'--device' , type=_UpperCAmelCase , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
serve_parser.set_defaults(func=_UpperCAmelCase )
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
UpperCamelCase_ = pipeline
UpperCamelCase_ = host
UpperCamelCase_ = port
UpperCamelCase_ = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'Using serve command requires FastAPI and uvicorn. '
'Please install transformers with [serving]: pip install "transformers[serving]".'
'Or install FastAPI and uvicorn separately.' )
else:
logger.info(f"""Serving model over {host}:{port}""" )
UpperCamelCase_ = FastAPI(
routes=[
APIRoute(
'/' , self.model_info , response_model=_UpperCAmelCase , response_class=_UpperCAmelCase , methods=['GET'] , ),
APIRoute(
'/tokenize' , self.tokenize , response_model=_UpperCAmelCase , response_class=_UpperCAmelCase , methods=['POST'] , ),
APIRoute(
'/detokenize' , self.detokenize , response_model=_UpperCAmelCase , response_class=_UpperCAmelCase , methods=['POST'] , ),
APIRoute(
'/forward' , self.forward , response_model=_UpperCAmelCase , response_class=_UpperCAmelCase , methods=['POST'] , ),
] , timeout=600 , )
def _UpperCAmelCase ( self ) -> Any:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def _UpperCAmelCase ( self ) -> Optional[int]:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def _UpperCAmelCase ( self , _UpperCAmelCase = Body(_UpperCAmelCase , embed=_UpperCAmelCase ) , _UpperCAmelCase = Body(_UpperCAmelCase , embed=_UpperCAmelCase ) ) -> Union[str, Any]:
try:
UpperCamelCase_ = self._pipeline.tokenizer.tokenize(_UpperCAmelCase )
if return_ids:
UpperCamelCase_ = self._pipeline.tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
return ServeTokenizeResult(tokens=_UpperCAmelCase , tokens_ids=_UpperCAmelCase )
else:
return ServeTokenizeResult(tokens=_UpperCAmelCase )
except Exception as e:
raise HTTPException(status_code=500 , detail={'model': '', 'error': str(_UpperCAmelCase )} )
def _UpperCAmelCase ( self , _UpperCAmelCase = Body(_UpperCAmelCase , embed=_UpperCAmelCase ) , _UpperCAmelCase = Body(_UpperCAmelCase , embed=_UpperCAmelCase ) , _UpperCAmelCase = Body(_UpperCAmelCase , embed=_UpperCAmelCase ) , ) -> int:
try:
UpperCamelCase_ = self._pipeline.tokenizer.decode(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return ServeDeTokenizeResult(model='' , text=_UpperCAmelCase )
except Exception as e:
raise HTTPException(status_code=500 , detail={'model': '', 'error': str(_UpperCAmelCase )} )
async def _UpperCAmelCase ( self , _UpperCAmelCase=Body(_UpperCAmelCase , embed=_UpperCAmelCase ) ) -> Tuple:
# Check we don't have empty string
if len(_UpperCAmelCase ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
UpperCamelCase_ = self._pipeline(_UpperCAmelCase )
return ServeForwardResult(output=_UpperCAmelCase )
except Exception as e:
raise HTTPException(500 , {'error': str(_UpperCAmelCase )} )
| 23 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _a ( unittest.TestCase ):
"""simple docstring"""
A_ = MODEL_FOR_MASKED_LM_MAPPING
A_ = TF_MODEL_FOR_MASKED_LM_MAPPING
def _UpperCAmelCase ( self ) -> List[str]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' )
UpperCamelCase_ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is grouped', 'score': 2.1e-05, 'token': 38015, 'token_str': ' grouped'},
{'sequence': 'My name is accuser', 'score': 2.1e-05, 'token': 25506, 'token_str': ' accuser'},
] , )
UpperCamelCase_ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{
'sequence': 'The largest city in France is grouped',
'score': 2.1e-05,
'token': 38015,
'token_str': ' grouped',
},
{
'sequence': 'The largest city in France is accuser',
'score': 2.1e-05,
'token': 25506,
'token_str': ' accuser',
},
] , )
UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Patrick', 'score': 2e-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 1.9e-05, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' )
UpperCamelCase_ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is Maul', 'score': 2.2e-05, 'token': 35676, 'token_str': ' Maul'},
{'sequence': 'My name isELS', 'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS'},
] , )
UpperCamelCase_ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{
'sequence': 'The largest city in France is Maul',
'score': 2.2e-05,
'token': 35676,
'token_str': ' Maul',
},
{'sequence': 'The largest city in France isELS', 'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS'},
] , )
UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is Patrick', 'score': 2.1e-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 2e-05, 'token': 2941, 'token_str': ' Te'},
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13606, 'token_str': ' Clara'},
] , )
UpperCamelCase_ = unmasker('My name is <mask> <mask>' , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
[
{
'score': 2.2e-05,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is Maul<mask></s>',
},
{'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'},
],
[
{
'score': 2.2e-05,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is<mask> Maul</s>',
},
{'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'},
],
] , )
@require_torch_gpu
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' )
# convert model to fp16
pipe.model.half()
UpperCamelCase_ = pipe('Paris is the [MASK] of France.' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
@slow
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' )
self.run_large_test(_UpperCAmelCase )
@slow
@require_tf
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' )
self.run_large_test(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{'sequence': 'My name is John', 'score': 0.0_0_8, 'token': 610, 'token_str': ' John'},
{'sequence': 'My name is Chris', 'score': 0.0_0_7, 'token': 1573, 'token_str': ' Chris'},
] , )
UpperCamelCase_ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{
'sequence': 'The largest city in France is Paris',
'score': 0.2_5_1,
'token': 2201,
'token_str': ' Paris',
},
{
'sequence': 'The largest city in France is Lyon',
'score': 0.2_1_4,
'token': 12790,
'token_str': ' Lyon',
},
] , )
UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{'sequence': 'My name is Patrick', 'score': 0.0_0_5, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Clara', 'score': 0.0_0_0, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Te', 'score': 0.0_0_0, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' )
UpperCamelCase_ = None
UpperCamelCase_ = None
self.run_pipeline_test(_UpperCAmelCase , [] )
@require_tf
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' )
UpperCamelCase_ = None
UpperCamelCase_ = None
self.run_pipeline_test(_UpperCAmelCase , [] )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' )
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = [
f"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
UpperCamelCase_ = fill_masker.tokenizer
UpperCamelCase_ = fill_masker.model
UpperCamelCase_ = fill_masker(
f"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = fill_masker([f"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
_UpperCAmelCase , [
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
] , )
with self.assertRaises(_UpperCAmelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_UpperCAmelCase ):
fill_masker('This is' )
self.run_test_top_k(_UpperCAmelCase , _UpperCAmelCase )
self.run_test_targets(_UpperCAmelCase , _UpperCAmelCase )
self.run_test_top_k_targets(_UpperCAmelCase , _UpperCAmelCase )
self.fill_mask_with_duplicate_targets_and_top_k(_UpperCAmelCase , _UpperCAmelCase )
self.fill_mask_with_multiple_masks(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
UpperCamelCase_ = tokenizer.get_vocab()
UpperCamelCase_ = sorted(vocab.keys() )[:2]
# Pipeline argument
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , targets=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _UpperCAmelCase )
UpperCamelCase_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_UpperCAmelCase ) )
# Call argument
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _UpperCAmelCase )
UpperCamelCase_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_UpperCAmelCase ) )
# Score equivalence
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
UpperCamelCase_ = [top_mask['token_str'] for top_mask in outputs]
UpperCamelCase_ = [top_mask['score'] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_UpperCAmelCase ) == set(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
UpperCamelCase_ = [top_mask['score'] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
# Raises with invalid
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[''] )
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets='' )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , top_k=2 )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
UpperCamelCase_ = tokenizer.get_vocab()
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
# top_k=2, ntargets=3
UpperCamelCase_ = sorted(vocab.keys() )[:3]
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=_UpperCAmelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
UpperCamelCase_ = [el['token_str'] for el in sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x["score"] , reverse=_UpperCAmelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_UpperCAmelCase ).issubset(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=_UpperCAmelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = tokenizer.get_vocab()
# String duplicates + id duplicates
UpperCamelCase_ = sorted(vocab.keys() )[:3]
UpperCamelCase_ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
UpperCamelCase_ = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=_UpperCAmelCase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_UpperCAmelCase ) , 3 )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(
f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
_UpperCAmelCase , [
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
] , )
| 23 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase = {'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 667 |
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( _snake_case ):
lowercase = (IPNDMScheduler,)
lowercase = (("num_inference_steps", 50),)
def snake_case_ ( self , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = {"""num_train_timesteps""": 1000}
config.update(**UpperCamelCase__ )
return config
def snake_case_ ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config(**UpperCamelCase__ )
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
A_ = scheduler_class.from_pretrained(UpperCamelCase__ )
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def snake_case_ ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
A_ = scheduler_class.from_pretrained(UpperCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(**UpperCamelCase__ )
A_ = scheduler_class(**UpperCamelCase__ )
A_ = 10
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
return sample
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , """set_timesteps""" ):
scheduler.set_timesteps(UpperCamelCase__ )
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , """set_timesteps""" ):
A_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A_ = dummy_past_residuals[:]
A_ = scheduler.timesteps[5]
A_ = scheduler.timesteps[6]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.full_loop()
A_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 667 | 1 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : List[str] , _snake_case : Optional[int] , _snake_case : List[str] ):
"""simple docstring"""
A__ = params
A__ = np.array(_snake_case )
A__ = np.array([len(_snake_case ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : str , _snake_case : str ):
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self : Union[str, Any] ):
"""simple docstring"""
return len(self.lengths )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def _a ( self : str ):
"""simple docstring"""
A__ = self.params.max_model_input_size
A__ = self.lengths > max_len
logger.info(F'''Splitting {sum(_snake_case )} too long sequences.''' )
def divide_chunks(_snake_case : List[Any] , _snake_case : Optional[int] ):
return [l[i : i + n] for i in range(0 , len(_snake_case ) , _snake_case )]
A__ = []
A__ = []
if self.params.mlm:
A__ , A__ = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
A__ , A__ = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
A__ = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
A__ = np.insert(_snake_case , 0 , _snake_case )
if sub_s[-1] != sep_id:
A__ = np.insert(_snake_case , len(_snake_case ) , _snake_case )
assert len(_snake_case ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(_snake_case )
new_tok_ids.extend(_snake_case )
new_lengths.extend([len(_snake_case ) for l in sub_seqs] )
A__ = np.array(_snake_case )
A__ = np.array(_snake_case )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = len(self )
A__ = self.lengths > 11
A__ = self.token_ids[indices]
A__ = self.lengths[indices]
A__ = len(self )
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
A__ = self.params.special_tok_ids['unk_token']
A__ = len(self )
A__ = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
A__ = (unk_occs / self.lengths) < 0.5
A__ = self.token_ids[indices]
A__ = self.lengths[indices]
A__ = len(self )
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def _a ( self : str ):
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def _a ( self : Optional[int] , _snake_case : List[str] ):
"""simple docstring"""
A__ = [t[0] for t in batch]
A__ = [t[1] for t in batch]
assert len(_snake_case ) == len(_snake_case )
# Max for paddings
A__ = max(_snake_case )
# Pad token ids
if self.params.mlm:
A__ = self.params.special_tok_ids['pad_token']
else:
A__ = self.params.special_tok_ids['unk_token']
A__ = [list(t.astype(_snake_case ) ) + [pad_idx] * (max_seq_len_ - len(_snake_case )) for t in token_ids]
assert len(tk_ ) == len(_snake_case )
assert all(len(_snake_case ) == max_seq_len_ for t in tk_ )
A__ = torch.tensor(tk_ ) # (bs, max_seq_len_)
A__ = torch.tensor(_snake_case ) # (bs)
return tk_t, lg_t
| 9 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''',
'''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''',
'''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''',
'''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''',
'''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''',
'''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''',
'''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''',
'''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''',
}
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : Union[str, Any] ="xlm"
lowerCamelCase__ : Any ={
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self , lowerCamelCase=30145 , lowerCamelCase=2048 , lowerCamelCase=12 , lowerCamelCase=16 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=1 , lowerCamelCase=True , lowerCamelCase=512 , lowerCamelCase=2048**-0.5 , lowerCamelCase=1e-12 , lowerCamelCase=0.0_2 , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=5 , lowerCamelCase=True , lowerCamelCase="first" , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=0.1 , lowerCamelCase=5 , lowerCamelCase=5 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase=0 , **lowerCamelCase , ) -> Dict:
"""simple docstring"""
__magic_name__ : List[Any] = vocab_size
__magic_name__ : str = emb_dim
__magic_name__ : Union[str, Any] = n_layers
__magic_name__ : Optional[Any] = n_heads
__magic_name__ : Dict = dropout
__magic_name__ : List[str] = attention_dropout
__magic_name__ : Optional[Any] = gelu_activation
__magic_name__ : Any = sinusoidal_embeddings
__magic_name__ : List[Any] = causal
__magic_name__ : Optional[Any] = asm
__magic_name__ : Tuple = n_langs
__magic_name__ : Union[str, Any] = use_lang_emb
__magic_name__ : str = layer_norm_eps
__magic_name__ : int = bos_index
__magic_name__ : int = eos_index
__magic_name__ : Any = pad_index
__magic_name__ : int = unk_index
__magic_name__ : Tuple = mask_index
__magic_name__ : int = is_encoder
__magic_name__ : Any = max_position_embeddings
__magic_name__ : List[Any] = embed_init_std
__magic_name__ : int = init_std
__magic_name__ : Optional[Any] = summary_type
__magic_name__ : List[str] = summary_use_proj
__magic_name__ : Optional[Any] = summary_activation
__magic_name__ : Union[str, Any] = summary_proj_to_labels
__magic_name__ : int = summary_first_dropout
__magic_name__ : Dict = start_n_top
__magic_name__ : int = end_n_top
__magic_name__ : Optional[int] = mask_token_id
__magic_name__ : Dict = lang_id
if "n_words" in kwargs:
__magic_name__ : str = kwargs['''n_words''']
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , **lowerCamelCase )
class A__ ( __SCREAMING_SNAKE_CASE ):
@property
def lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__magic_name__ : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__magic_name__ : Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 154 | 0 |
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(lowerCamelCase_ ) , lowerCamelCase_ )
return number - int(lowerCamelCase_ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 716 |
import argparse
import os
import re
A__ : Optional[int] = 'src/transformers'
# Pattern that looks at the indentation in a line.
A__ : Union[str, Any] = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
A__ : List[str] = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
A__ : List[Any] = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
A__ : int = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
A__ : Tuple = re.compile(r'\[([^\]]+)\]')
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = _re_indent.search(lowerCamelCase_ )
return "" if search is None else search.groups()[0]
def a ( lowerCamelCase_ , lowerCamelCase_="" , lowerCamelCase_=None , lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = 0
lowercase__ = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(lowerCamelCase_ ):
index += 1
lowercase__ = ['''\n'''.join(lines[:index] )]
else:
lowercase__ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowercase__ = [lines[index]]
index += 1
while index < len(lowerCamelCase_ ) and (end_prompt is None or not lines[index].startswith(lowerCamelCase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCamelCase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(lowerCamelCase_ ) )
if index < len(lowerCamelCase_ ) - 1:
lowercase__ = [lines[index + 1]]
index += 1
else:
lowercase__ = []
else:
blocks.append('''\n'''.join(lowerCamelCase_ ) )
lowercase__ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCamelCase_ ) > 0:
blocks.append('''\n'''.join(lowerCamelCase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCamelCase_ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def a ( lowerCamelCase_ ):
'''simple docstring'''
def _inner(lowerCamelCase_ ):
return key(lowerCamelCase_ ).lower().replace('''_''' , '''''' )
return _inner
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
# If no key is provided, we use a noop.
def noop(lowerCamelCase_ ):
return x
if key is None:
lowercase__ = noop
# Constants are all uppercase, they go first.
lowercase__ = [obj for obj in objects if key(lowerCamelCase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowercase__ = [obj for obj in objects if key(lowerCamelCase_ )[0].isupper() and not key(lowerCamelCase_ ).isupper()]
# Functions begin with a lowercase, they go last.
lowercase__ = [obj for obj in objects if not key(lowerCamelCase_ )[0].isupper()]
lowercase__ = ignore_underscore(lowerCamelCase_ )
return sorted(lowerCamelCase_ , key=lowerCamelCase_ ) + sorted(lowerCamelCase_ , key=lowerCamelCase_ ) + sorted(lowerCamelCase_ , key=lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
# This inner function sort imports between [ ].
def _replace(lowerCamelCase_ ):
lowercase__ = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
lowercase__ = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase_ )] ) + "]"
lowercase__ = import_statement.split('''\n''' )
if len(lowerCamelCase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowercase__ = 2 if lines[1].strip() == '''[''' else 1
lowercase__ = [(i, _re_strip_line.search(lowerCamelCase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowercase__ = sort_objects(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] )
lowercase__ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCamelCase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowercase__ = _re_bracket_content.sub(_replace , lines[1] )
else:
lowercase__ = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ = keys[:-1]
lowercase__ = get_indent(lines[1] ) + ''', '''.join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase_ )] )
return "\n".join(lowerCamelCase_ )
else:
# Finally we have to deal with imports fitting on one line
lowercase__ = _re_bracket_content.sub(_replace , lowerCamelCase_ )
return import_statement
def a ( lowerCamelCase_ , lowerCamelCase_=True ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding='''utf-8''' ) as f:
lowercase__ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowercase__ = split_code_in_indented_blocks(
lowerCamelCase_ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowerCamelCase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowercase__ = main_blocks[block_idx]
lowercase__ = block.split('''\n''' )
# Get to the start of the imports.
lowercase__ = 0
while line_idx < len(lowerCamelCase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowercase__ = len(lowerCamelCase_ )
else:
line_idx += 1
if line_idx >= len(lowerCamelCase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
lowercase__ = '''\n'''.join(block_lines[line_idx:-1] )
lowercase__ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowercase__ = split_code_in_indented_blocks(lowerCamelCase_ , indent_level=lowerCamelCase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
lowercase__ = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowercase__ = [(pattern.search(lowerCamelCase_ ).groups()[0] if pattern.search(lowerCamelCase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowercase__ = [(i, key) for i, key in enumerate(lowerCamelCase_ ) if key is not None]
lowercase__ = [x[0] for x in sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowercase__ = 0
lowercase__ = []
for i in range(len(lowerCamelCase_ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowercase__ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowerCamelCase_ )
count += 1
# And we put our main block back together with its first and last line.
lowercase__ = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCamelCase_ ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(lowerCamelCase_ ) )
def a ( lowerCamelCase_=True ):
'''simple docstring'''
lowercase__ = []
for root, _, files in os.walk(lowerCamelCase_ ):
if "__init__.py" in files:
lowercase__ = sort_imports(os.path.join(lowerCamelCase_ , '''__init__.py''' ) , check_only=lowerCamelCase_ )
if result:
lowercase__ = [os.path.join(lowerCamelCase_ , '''__init__.py''' )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(F"""Would overwrite {len(lowerCamelCase_ )} files, run `make style`.""" )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
A__ : int = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 671 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : torch.FloatTensor
__UpperCAmelCase : torch.FloatTensor
class A ( UpperCAmelCase_ , UpperCAmelCase_ ):
__UpperCAmelCase : int = 1
@register_to_config
def __init__(self : Dict , __UpperCAmelCase : int = 2_0_0_0 , __UpperCAmelCase : float = 0.15 , __UpperCAmelCase : float = 0.01 , __UpperCAmelCase : float = 1348.0 , __UpperCAmelCase : float = 1E-5 , __UpperCAmelCase : int = 1 , ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = sigma_max
# setable values
UpperCAmelCase__ = None
self.set_sigmas(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def lowercase_ (self : Any , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : Optional[int] = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : float = None , __UpperCAmelCase : Union[str, torch.device] = None ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
UpperCAmelCase__ = torch.linspace(1 , __UpperCAmelCase , __UpperCAmelCase , device=__UpperCAmelCase )
def lowercase_ (self : Any , __UpperCAmelCase : int , __UpperCAmelCase : float = None , __UpperCAmelCase : float = None , __UpperCAmelCase : float = None ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = sigma_min if sigma_min is not None else self.config.sigma_min
UpperCAmelCase__ = sigma_max if sigma_max is not None else self.config.sigma_max
UpperCAmelCase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
UpperCAmelCase__ = torch.exp(torch.linspace(math.log(__UpperCAmelCase ) , math.log(__UpperCAmelCase ) , __UpperCAmelCase ) )
UpperCAmelCase__ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowercase_ (self : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def lowercase_ (self : Tuple , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : int , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : Optional[torch.Generator] = None , __UpperCAmelCase : bool = True , ) -> Union[SdeVeOutput, Tuple]:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
UpperCAmelCase__ = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
UpperCAmelCase__ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
UpperCAmelCase__ = timesteps.to(self.discrete_sigmas.device )
UpperCAmelCase__ = self.discrete_sigmas[timesteps].to(sample.device )
UpperCAmelCase__ = self.get_adjacent_sigma(__UpperCAmelCase , __UpperCAmelCase ).to(sample.device )
UpperCAmelCase__ = torch.zeros_like(__UpperCAmelCase )
UpperCAmelCase__ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
UpperCAmelCase__ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
UpperCAmelCase__ = diffusion.unsqueeze(-1 )
UpperCAmelCase__ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
UpperCAmelCase__ = randn_tensor(
sample.shape , layout=sample.layout , generator=__UpperCAmelCase , device=sample.device , dtype=sample.dtype )
UpperCAmelCase__ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
UpperCAmelCase__ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__UpperCAmelCase , prev_sample_mean=__UpperCAmelCase )
def lowercase_ (self : str , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : Optional[torch.Generator] = None , __UpperCAmelCase : bool = True , ) -> Union[SchedulerOutput, Tuple]:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
UpperCAmelCase__ = randn_tensor(sample.shape , layout=sample.layout , generator=__UpperCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
UpperCAmelCase__ = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
UpperCAmelCase__ = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
UpperCAmelCase__ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
UpperCAmelCase__ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
UpperCAmelCase__ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
UpperCAmelCase__ = step_size.unsqueeze(-1 )
UpperCAmelCase__ = sample + step_size * model_output
UpperCAmelCase__ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__UpperCAmelCase )
def lowercase_ (self : List[str] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : torch.FloatTensor , ) -> torch.FloatTensor:
"""simple docstring"""
UpperCAmelCase__ = timesteps.to(original_samples.device )
UpperCAmelCase__ = self.discrete_sigmas.to(original_samples.device )[timesteps]
UpperCAmelCase__ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__UpperCAmelCase ) * sigmas[:, None, None, None]
)
UpperCAmelCase__ = noise + original_samples
return noisy_samples
def __len__(self : Tuple ) -> int:
"""simple docstring"""
return self.config.num_train_timesteps
| 486 | import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : int = DownBlockaD # noqa F405
__UpperCAmelCase : int = 'down'
def lowercase_ (self : List[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : List[Any] = ResnetDownsampleBlockaD # noqa F405
__UpperCAmelCase : str = 'down'
def lowercase_ (self : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Any = AttnDownBlockaD # noqa F405
__UpperCAmelCase : Optional[int] = 'down'
def lowercase_ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : str = CrossAttnDownBlockaD # noqa F405
__UpperCAmelCase : List[Any] = 'down'
def lowercase_ (self : int ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase__ = 3_2
return init_dict, inputs_dict
def lowercase_ (self : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Optional[Any] = SimpleCrossAttnDownBlockaD # noqa F405
__UpperCAmelCase : Union[str, Any] = 'down'
@property
def lowercase_ (self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=__UpperCAmelCase )
def lowercase_ (self : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase__ = 3_2
return init_dict, inputs_dict
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def lowercase_ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : List[str] = SkipDownBlockaD # noqa F405
__UpperCAmelCase : str = 'down'
@property
def lowercase_ (self : Optional[Any] ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=__UpperCAmelCase )
def lowercase_ (self : List[str] ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Dict = AttnSkipDownBlockaD # noqa F405
__UpperCAmelCase : Optional[Any] = 'down'
@property
def lowercase_ (self : List[str] ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=__UpperCAmelCase )
def lowercase_ (self : List[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : str = DownEncoderBlockaD # noqa F405
__UpperCAmelCase : List[str] = 'down'
@property
def lowercase_ (self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=__UpperCAmelCase )
def lowercase_ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = {
"in_channels": 3_2,
"out_channels": 3_2,
}
UpperCAmelCase__ = self.dummy_input
return init_dict, inputs_dict
def lowercase_ (self : Any ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Union[str, Any] = AttnDownEncoderBlockaD # noqa F405
__UpperCAmelCase : List[Any] = 'down'
@property
def lowercase_ (self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return super().get_dummy_input(include_temb=__UpperCAmelCase )
def lowercase_ (self : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = {
"in_channels": 3_2,
"out_channels": 3_2,
}
UpperCAmelCase__ = self.dummy_input
return init_dict, inputs_dict
def lowercase_ (self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : int = UNetMidBlockaD # noqa F405
__UpperCAmelCase : Optional[int] = 'mid'
def lowercase_ (self : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = {
"in_channels": 3_2,
"temb_channels": 1_2_8,
}
UpperCAmelCase__ = self.dummy_input
return init_dict, inputs_dict
def lowercase_ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Dict = UNetMidBlockaDCrossAttn # noqa F405
__UpperCAmelCase : List[str] = 'mid'
def lowercase_ (self : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase__ = 3_2
return init_dict, inputs_dict
def lowercase_ (self : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Optional[Any] = UNetMidBlockaDSimpleCrossAttn # noqa F405
__UpperCAmelCase : List[Any] = 'mid'
@property
def lowercase_ (self : str ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=__UpperCAmelCase )
def lowercase_ (self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase__ = 3_2
return init_dict, inputs_dict
def lowercase_ (self : int ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : int = UpBlockaD # noqa F405
__UpperCAmelCase : Dict = 'up'
@property
def lowercase_ (self : List[str] ) -> Optional[Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase )
def lowercase_ (self : Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : List[Any] = ResnetUpsampleBlockaD # noqa F405
__UpperCAmelCase : int = 'up'
@property
def lowercase_ (self : Any ) -> Dict:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase )
def lowercase_ (self : str ) -> str:
"""simple docstring"""
UpperCAmelCase__ = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Optional[int] = CrossAttnUpBlockaD # noqa F405
__UpperCAmelCase : int = 'up'
@property
def lowercase_ (self : Any ) -> Tuple:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase )
def lowercase_ (self : List[str] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase__ = 3_2
return init_dict, inputs_dict
def lowercase_ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Tuple = SimpleCrossAttnUpBlockaD # noqa F405
__UpperCAmelCase : str = 'up'
@property
def lowercase_ (self : Tuple ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase , include_encoder_hidden_states=__UpperCAmelCase )
def lowercase_ (self : int ) -> Any:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase__ = 3_2
return init_dict, inputs_dict
def lowercase_ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : str = AttnUpBlockaD # noqa F405
__UpperCAmelCase : Dict = 'up'
@property
def lowercase_ (self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase )
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def lowercase_ (self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : List[Any] = SkipUpBlockaD # noqa F405
__UpperCAmelCase : int = 'up'
@property
def lowercase_ (self : List[Any] ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase )
def lowercase_ (self : Dict ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Union[str, Any] = AttnSkipUpBlockaD # noqa F405
__UpperCAmelCase : List[str] = 'up'
@property
def lowercase_ (self : Any ) -> Any:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase )
def lowercase_ (self : Dict ) -> str:
"""simple docstring"""
UpperCAmelCase__ = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Dict = UpDecoderBlockaD # noqa F405
__UpperCAmelCase : Tuple = 'up'
@property
def lowercase_ (self : int ) -> Optional[Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=__UpperCAmelCase )
def lowercase_ (self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = {"in_channels": 3_2, "out_channels": 3_2}
UpperCAmelCase__ = self.dummy_input
return init_dict, inputs_dict
def lowercase_ (self : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Dict = AttnUpDecoderBlockaD # noqa F405
__UpperCAmelCase : Union[str, Any] = 'up'
@property
def lowercase_ (self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=__UpperCAmelCase )
def lowercase_ (self : Optional[int] ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = {"in_channels": 3_2, "out_channels": 3_2}
UpperCAmelCase__ = self.dummy_input
return init_dict, inputs_dict
def lowercase_ (self : Dict ) -> str:
"""simple docstring"""
UpperCAmelCase__ = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(__UpperCAmelCase )
| 486 | 1 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
lowerCamelCase__ = SpeechTaTokenizer
lowerCamelCase__ = False
lowerCamelCase__ = True
def SCREAMING_SNAKE_CASE_ ( self :str ):
super().setUp()
# We have a SentencePiece fixture for testing
__SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaTokenizer(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = AddedToken('''<mask>''' , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] , _lowerCamelCase :List[str] ):
__SCREAMING_SNAKE_CASE : Tuple = '''this is a test'''
__SCREAMING_SNAKE_CASE : Dict = '''this is a test'''
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self :List[Any] , _lowerCamelCase :int , _lowerCamelCase :List[Any]=False , _lowerCamelCase :List[str]=2_0 , _lowerCamelCase :Any=5 ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = self.get_input_output_texts(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Any = tokenizer.decode(_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
return text, ids
def SCREAMING_SNAKE_CASE_ ( self :str ):
__SCREAMING_SNAKE_CASE : Tuple = '''<pad>'''
__SCREAMING_SNAKE_CASE : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
__SCREAMING_SNAKE_CASE : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-4] , '''œ''' )
self.assertEqual(vocab_keys[-2] , '''<mask>''' )
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' )
self.assertEqual(len(_lowerCamelCase ) , 8_1 )
def SCREAMING_SNAKE_CASE_ ( self :Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 7_9 )
def SCREAMING_SNAKE_CASE_ ( self :str ):
__SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.vocab_size
__SCREAMING_SNAKE_CASE : str = len(_lowerCamelCase )
self.assertNotEqual(_lowerCamelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__SCREAMING_SNAKE_CASE : Optional[int] = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.add_tokens(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.vocab_size
__SCREAMING_SNAKE_CASE : Optional[int] = len(_lowerCamelCase )
self.assertNotEqual(_lowerCamelCase , 0 )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(_lowerCamelCase , len(_lowerCamelCase ) )
self.assertEqual(_lowerCamelCase , all_size + len(_lowerCamelCase ) )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=_lowerCamelCase )
self.assertGreaterEqual(len(_lowerCamelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__SCREAMING_SNAKE_CASE : List[Any] = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
__SCREAMING_SNAKE_CASE : str = tokenizer.add_special_tokens(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.vocab_size
__SCREAMING_SNAKE_CASE : Optional[Any] = len(_lowerCamelCase )
self.assertNotEqual(_lowerCamelCase , 0 )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(_lowerCamelCase , len(_lowerCamelCase ) )
self.assertEqual(_lowerCamelCase , all_size_a + len(_lowerCamelCase ) )
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=_lowerCamelCase )
self.assertGreaterEqual(len(_lowerCamelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
pass
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
pass
def SCREAMING_SNAKE_CASE_ ( self :str ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : str = tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(_lowerCamelCase , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [4, 3_2, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 7, 4, 6, 5, 1_2, 6] , )
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowerCamelCase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
# fmt: off
self.assertListEqual(_lowerCamelCase , [4, 3_0, 4, 2_0, 7, 1_2, 4, 2_5, 8, 1_3, 9, 4, 1_0, 9, 4, 3, 2_3, 4, 7, 9, 1_4, 4, 6, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 1_9, 7, 1_5, 1_2, 7_3, 2_6] )
# fmt: on
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
# Use custom sequence because this tokenizer does not handle numbers.
__SCREAMING_SNAKE_CASE : Tuple = [
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
__SCREAMING_SNAKE_CASE : List[str] = {
'''input_ids''': [
[4, 3_2, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 6_4, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_5, 2_2, 4, 2_8, 9, 8, 2_0, 9, 4, 7, 1_2, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 6, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 7, 9, 1_4, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 3_9, 2_5, 5, 1_3, 6, 6_3, 4, 2_4, 1_3, 8, 2_7, 1_0, 1_4, 5, 1_2, 4, 2_1, 5, 9, 5, 1_3, 7, 1_5, 3_9, 2_4, 1_6, 1_3, 2_4, 8, 1_2, 5, 4, 7, 1_3, 1_7, 1_1, 1_0, 6, 5, 1_7, 6, 1_6, 1_3, 5, 1_2, 4, 6_4, 4_0, 4_7, 5_4, 3_2, 2_3, 4, 5_3, 4_9, 3_2, 2_3, 4, 5_4, 8, 4_0, 4_7, 5_4, 3_2, 7, 2_3, 4, 6_9, 5_2, 4_3, 2_3, 4, 5_1, 1_0, 1_2, 6, 1_0, 1_5, 4_0, 5, 1_3, 6, 2_3, 4, 6_9, 5_2, 4_8, 5, 6, 2_6, 2_6, 2_6, 6_3, 4, 1_9, 8, 1_3, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 6_1, 9, 1_4, 5, 1_3, 1_2, 6, 7, 9, 1_4, 1_0, 9, 2_1, 4, 6_4, 4_8, 5_2, 6_1, 6_3, 4, 7, 9, 1_4, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 5_3, 5, 9, 5, 1_3, 7, 6, 1_0, 8, 9, 4, 6_4, 4_8, 5_2, 5_3, 6_3, 4, 2_0, 1_0, 6, 1_1, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 1_0, 1_3, 6, 2_2, 3_9, 6, 2_0, 8, 4, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 4, 1_8, 8, 1_4, 5, 1_5, 1_2, 4, 1_0, 9, 4, 8, 9, 5, 4, 1_1, 1_6, 9, 1_4, 1_3, 5, 1_4, 4, 2_4, 1_5, 1_6, 1_2, 4, 1_5, 7, 9, 2_1, 1_6, 7, 2_1, 5, 1_2, 4, 7, 9, 1_4, 4, 1_4, 5, 5, 2_4, 4, 1_0, 9, 6, 5, 1_3, 8, 2_4, 5, 1_3, 7, 2_5, 1_0, 1_5, 1_0, 6, 2_2, 4, 2_5, 5, 6, 2_0, 5, 5, 9, 4, 5_8, 7, 3_7, 2_3, 4, 4_9, 2_2, 3_2, 8, 1_3, 1_7, 1_1, 4, 7, 9, 1_4, 4, 3_2, 5, 9, 1_2, 8, 1_3, 5_5, 1_5, 8, 2_0, 2_6, 2],
[4, 4_0, 4_7, 5_4, 3_2, 4, 1_0, 1_2, 4, 1_4, 5, 1_2, 1_0, 2_1, 9, 5, 1_4, 4, 6, 8, 4, 2_4, 1_3, 5, 3_9, 6, 1_3, 7, 1_0, 9, 4, 1_4, 5, 5, 2_4, 4, 2_5, 1_0, 1_4, 1_0, 1_3, 5, 1_7, 6, 1_0, 8, 9, 7, 1_5, 4, 1_3, 5, 2_4, 1_3, 5, 1_2, 5, 9, 6, 7, 6, 1_0, 8, 9, 1_2, 4, 1_9, 1_3, 8, 1_8, 4, 1_6, 9, 1_5, 7, 2_5, 5, 1_5, 5, 1_4, 4, 6, 5, 3_7, 6, 4, 2_5, 2_2, 4, 4_6, 8, 1_0, 9, 6, 1_5, 2_2, 4, 1_7, 8, 9, 1_4, 1_0, 6, 1_0, 8, 9, 1_0, 9, 2_1, 4, 8, 9, 4, 2_5, 8, 6, 1_1, 4, 1_5, 5, 1_9, 6, 4, 7, 9, 1_4, 4, 1_3, 1_0, 2_1, 1_1, 6, 4, 1_7, 8, 9, 6, 5, 3_7, 6, 4, 1_0, 9, 4, 7, 1_5, 1_5, 4, 1_5, 7, 2_2, 5, 1_3, 1_2, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 3_2, 1_1, 5, 4, 4_5, 1_6, 1_0, 1_7, 2_8, 4, 2_5, 1_3, 8, 2_0, 9, 4, 1_9, 8, 3_7, 4, 4_6, 1_6, 1_8, 2_4, 1_2, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 5, 4, 1_5, 7, 5_7, 2_2, 4, 1_4, 8, 2_1, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=_lowerCamelCase , )
| 401 |
"""simple docstring"""
_lowerCamelCase = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_lowerCamelCase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_lowerCamelCase = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 401 | 1 |
'''simple docstring'''
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case : List[str] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class A ( _a ,unittest.TestCase ):
lowercase_ = XLMProphetNetTokenizer
lowercase_ = False
lowercase_ = True
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_a = XLMProphetNetTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
_a = '''[PAD]'''
_a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
_a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(lowerCAmelCase_ ) , 10_12 )
def __lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_12 )
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_a = XLMProphetNetTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
_a = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
_a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_a = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
_a = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def __lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
_a = '''Hello World!'''
_a = [3_53_89, 66_72, 49, 2]
self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_ ) )
@slow
def __lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
_a = {'''input_ids''': [[1_10_73, 8_27_83, 18, 26, 8_27_83, 5_49, 5_15_40, 2_48, 1_72_09, 13_01, 2_17, 20, 21_51_86, 13_25, 1_47, 1_72_09, 13_01, 2_17, 20, 5_63_70, 53, 12_20_20, 20, 1_64_77, 27, 8_73_55, 45_48, 20, 47_28, 7_83_92, 17, 15_99_69, 18, 26, 2_44_91, 6_29, 15, 5_38, 2_27_04, 54_39, 15, 27_88, 2_44_91, 98_85, 15, 4_35_34, 6_05, 15, 8_14, 1_84_03, 3_32_00, 29, 15, 4_35_34, 2_44_58, 1_24_10, 1_11, 2_49_66, 8_36_69, 96_37, 14_40_68, 26, 8_50, 2_23_46, 27, 1_47, 2_49_66, 8_36_69, 8_34_90, 26, 3_91_13, 7_35, 27, 6_89, 6_56, 28_00, 13_39, 46_00, 53, 12_20_20, 11_57_85, 34, 8_16, 13_39, 4_68_87, 18, 1_47, 5_39_05, 19_51, 4_22_38, 4_11_70, 1_77_32, 8_34, 4_36, 15, 2_75_23, 9_87_33, 2_17, 1_47, 55_42, 49_81, 9_30, 1_73_47, 16, 2], [2_00_91, 6_29, 94, 8_27_86, 58, 4_90, 20, 15_28, 84, 5_39_05, 3_44, 8_05_92, 11_01_28, 1_88_22, 52_67, 13_06, 62, 15_25_37, 3_08, 79_97, 4_01, 12_44_27, 5_49, 3_54_42, 2_25, 1_09, 1_50_55, 2_57_48, 1_47, 71_19, 4_37_12, 34, 7_67, 13_53_66, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_92, 6_37_84, 11_94_66, 17, 14_78_08, 8_82_14, 18, 6_56, 81, 32, 32_96, 1_02_80, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 22 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =(IPNDMScheduler,)
SCREAMING_SNAKE_CASE__ =(("""num_inference_steps""", 50),)
def __lowerCAmelCase ( self, **_a ) -> str:
__SCREAMING_SNAKE_CASE = {"num_train_timesteps": 10_00}
config.update(**_a )
return config
def __lowerCAmelCase ( self, _a=0, **_a ) -> List[Any]:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a )
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
if time_step is None:
__SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
__SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a )
new_scheduler.set_timesteps(_a )
# copy over dummy past residuals
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self ) -> str:
pass
def __lowerCAmelCase ( self, _a=0, **_a ) -> int:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals (must be after setting timesteps)
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
if time_step is None:
__SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
__SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a )
# copy over dummy past residuals
new_scheduler.set_timesteps(_a )
# copy over dummy past residual (must be after setting timesteps)
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self, **_a ) -> Tuple:
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a )
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
__SCREAMING_SNAKE_CASE = 10
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
scheduler.set_timesteps(_a )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE = model(_a, _a )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE = model(_a, _a )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample
return sample
def __lowerCAmelCase ( self ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
if num_inference_steps is not None and hasattr(_a, "set_timesteps" ):
scheduler.set_timesteps(_a )
elif num_inference_steps is not None and not hasattr(_a, "set_timesteps" ):
__SCREAMING_SNAKE_CASE = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.timesteps[5]
__SCREAMING_SNAKE_CASE = scheduler.timesteps[6]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def __lowerCAmelCase ( self ) -> str:
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_a, time_step=_a )
def __lowerCAmelCase ( self ) -> Optional[Any]:
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=_a, time_step=_a )
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = self.full_loop()
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 2_54_05_29 ) < 10
| 693 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
__a = None
__a = logging.get_logger(__name__)
__a = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__a = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
__a = {
"camembert-base": 5_1_2,
}
__a = "▁"
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Optional[int] = VOCAB_FILES_NAMES
a :Optional[int] = PRETRAINED_VOCAB_FILES_MAP
a :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a :Any = ['input_ids', 'attention_mask']
a :Optional[Any] = CamembertTokenizer
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : List[Any]="<s>" , SCREAMING_SNAKE_CASE_ : Tuple="</s>" , SCREAMING_SNAKE_CASE_ : str="</s>" , SCREAMING_SNAKE_CASE_ : List[str]="<s>" , SCREAMING_SNAKE_CASE_ : List[Any]="<unk>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="<pad>" , SCREAMING_SNAKE_CASE_ : int="<mask>" , SCREAMING_SNAKE_CASE_ : List[Any]=["<s>NOTUSED", "</s>NOTUSED"] , **SCREAMING_SNAKE_CASE_ : List[Any] , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , additional_special_tokens=__UpperCamelCase , **__UpperCamelCase , )
lowercase_ = vocab_file
lowercase_ = False if not self.vocab_file else True
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase_ = [self.cls_token_id]
lowercase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ):
copyfile(self.vocab_file , __UpperCamelCase )
return (out_vocab_file,)
| 707 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class lowercase__:
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str=1_3 , SCREAMING_SNAKE_CASE_ : List[str]=2 , SCREAMING_SNAKE_CASE_ : int=2_4 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_6 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : List[str]=3_2 , SCREAMING_SNAKE_CASE_ : str=5 , SCREAMING_SNAKE_CASE_ : Any=4 , SCREAMING_SNAKE_CASE_ : Optional[int]=3_7 , SCREAMING_SNAKE_CASE_ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , ) -> Union[str, Any]:
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = patch_size
lowercase_ = max_length
lowercase_ = num_mel_bins
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = scope
lowercase_ = frequency_stride
lowercase_ = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowercase_ = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
lowercase_ = (self.max_length - self.patch_size) // self.time_stride + 1
lowercase_ = frequency_out_dimension * time_out_dimension
lowercase_ = num_patches + 2
def _lowercase ( self : Optional[Any] ) -> List[Any]:
lowercase_ = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, input_values, labels
def _lowercase ( self : List[Any] ) -> Any:
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> str:
lowercase_ = ASTModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Optional[int] ) -> Dict:
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class lowercase__( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :Optional[int] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
a :List[Any] = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
a :Optional[Any] = False
a :str = False
a :Optional[int] = False
a :int = False
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple ) -> Union[str, Any]:
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _lowercase ( self : Any ) -> Union[str, Any]:
lowercase_ = ASTModelTester(self )
lowercase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=3_7 )
def _lowercase ( self : List[str] ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def _lowercase ( self : Tuple ) -> str:
pass
def _lowercase ( self : List[str] ) -> Union[str, Any]:
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def _lowercase ( self : int ) -> List[str]:
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(SCREAMING_SNAKE_CASE_ )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ['''input_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[Any] ) -> Optional[Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
@slow
def _lowercase ( self : Union[str, Any] ) -> Dict:
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = ASTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def a ( ):
'''simple docstring'''
lowercase_ = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
lowercase_ , lowercase_ = torchaudio.load(snake_case__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : Tuple ) -> Any:
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def _lowercase ( self : List[str] ) -> Any:
lowercase_ = self.default_feature_extractor
lowercase_ = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.default_feature_extractor
lowercase_ , lowercase_ = prepare_audio()
lowercase_ = audio.squeeze().numpy()
lowercase_ = feature_extractor(SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
lowercase_ = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
lowercase_ = torch.Size((1, 5_2_7) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.tensor([-0.87_60, -7.00_42, -8.66_02] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 409 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
lowerCAmelCase__ :int = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase__ :Optional[Any] = 'A painting of a squirrel eating a burger'
lowerCAmelCase__ :Union[str, Any] = torch.manual_seed(0 )
lowerCAmelCase__ :Tuple = sd_pipe([prompt] , generator=__UpperCAmelCase , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
lowerCAmelCase__ :Optional[Any] = output.images
lowerCAmelCase__ :Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase__ :Tuple = np.array([0.04_47, 0.04_92, 0.04_68, 0.04_08, 0.03_83, 0.04_08, 0.03_54, 0.03_80, 0.03_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase__ :List[Any] = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase__ :Tuple = 'A painting of a squirrel eating a burger'
lowerCAmelCase__ :str = torch.manual_seed(0 )
lowerCAmelCase__ :Any = sd_pipe([prompt] , generator=__UpperCAmelCase , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
lowerCAmelCase__ :Optional[Any] = output.images
lowerCAmelCase__ :Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase__ :Dict = np.array([0.12_37, 0.13_20, 0.14_38, 0.13_59, 0.13_90, 0.11_32, 0.12_77, 0.11_75, 0.11_12] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase__ :Optional[int] = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
lowerCAmelCase__ :List[Any] = 'A painting of a squirrel eating a burger'
lowerCAmelCase__ :Optional[Any] = torch.manual_seed(0 )
lowerCAmelCase__ :Any = sd_pipe(
[prompt] , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=1_5 , output_type='np' , use_karras_sigmas=__UpperCAmelCase , )
lowerCAmelCase__ :Tuple = output.images
lowerCAmelCase__ :int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase__ :Union[str, Any] = np.array(
[0.11_38_16_89, 0.12_11_29_21, 0.1_38_94_57, 0.12_54_96_06, 0.1_24_49_64, 0.10_83_15_17, 0.11_56_28_66, 0.10_86_78_16, 0.10_49_90_48] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 93 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : Union[str, Any] = '''convbert'''
def __init__(self , UpperCAmelCase=3_0_5_2_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=2 , UpperCAmelCase=9 , UpperCAmelCase=1 , UpperCAmelCase=None , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase , )
__UpperCAmelCase =vocab_size
__UpperCAmelCase =hidden_size
__UpperCAmelCase =num_hidden_layers
__UpperCAmelCase =num_attention_heads
__UpperCAmelCase =intermediate_size
__UpperCAmelCase =hidden_act
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =max_position_embeddings
__UpperCAmelCase =type_vocab_size
__UpperCAmelCase =initializer_range
__UpperCAmelCase =layer_norm_eps
__UpperCAmelCase =embedding_size
__UpperCAmelCase =head_ratio
__UpperCAmelCase =conv_kernel_size
__UpperCAmelCase =num_groups
__UpperCAmelCase =classifier_dropout
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
@property
def A__ (self):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCAmelCase ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 132 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 65 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> list[str]:
if partitions <= 0:
raise ValueError('''partitions must be a positive number!''' )
if partitions > number_of_bytes:
raise ValueError('''partitions can not > number_of_bytes!''' )
__a = number_of_bytes // partitions
__a = []
for i in range(lowerCAmelCase__ ):
__a = i * bytes_per_partition + 1
__a = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
lowerCamelCase__ : Union[str, Any] = {
"""configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : int = [
"""ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ErnieForCausalLM""",
"""ErnieForMaskedLM""",
"""ErnieForMultipleChoice""",
"""ErnieForNextSentencePrediction""",
"""ErnieForPreTraining""",
"""ErnieForQuestionAnswering""",
"""ErnieForSequenceClassification""",
"""ErnieForTokenClassification""",
"""ErnieModel""",
"""ErniePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
lowerCamelCase__ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 33 |
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def _lowercase (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for i in range(0 , SCREAMING_SNAKE_CASE ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(" " , end="" )
for _ in range(0 , i + 1 ): # printing stars
print("* " , end="" )
print()
def _lowercase (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for i in range(SCREAMING_SNAKE_CASE , 0 , -1 ):
for _ in range(SCREAMING_SNAKE_CASE , 0 , -1 ): # printing stars
print("* " , end="" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(" " , end="" )
def _lowercase (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if n <= 0:
print(" ... .... nothing printing :(" )
return
floyd(SCREAMING_SNAKE_CASE ) # upper half
reverse_floyd(SCREAMING_SNAKE_CASE ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
_UpperCamelCase = 1
while K:
_UpperCamelCase = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
_UpperCamelCase = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 111 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
UpperCamelCase__ : Dict = logging.get_logger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(SCREAMING_SNAKE_CASE_ ):
return [[videos]]
raise ValueError(F"Could not make batched video from {videos}" )
class _a (__a):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ['pixel_values']
def __init__( self , A__ = True , A__ = None , A__ = PILImageResampling.BILINEAR , A__ = True , A__ = None , A__ = True , A__ = 1 / 2_55 , A__ = True , A__ = True , A__ = None , A__ = None , **A__ , ) -> None:
super().__init__(**A__ )
_SCREAMING_SNAKE_CASE = size if size is not None else {"""shortest_edge""": 2_56}
_SCREAMING_SNAKE_CASE = get_size_dict(A__ , default_to_square=A__ )
_SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
_SCREAMING_SNAKE_CASE = get_size_dict(A__ , param_name="""crop_size""" )
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = size
_SCREAMING_SNAKE_CASE = do_center_crop
_SCREAMING_SNAKE_CASE = crop_size
_SCREAMING_SNAKE_CASE = resample
_SCREAMING_SNAKE_CASE = do_rescale
_SCREAMING_SNAKE_CASE = rescale_factor
_SCREAMING_SNAKE_CASE = offset
_SCREAMING_SNAKE_CASE = do_normalize
_SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase ( self , A__ , A__ , A__ = PILImageResampling.BILINEAR , A__ = None , **A__ , ) -> np.ndarray:
_SCREAMING_SNAKE_CASE = get_size_dict(A__ , default_to_square=A__ )
if "shortest_edge" in size:
_SCREAMING_SNAKE_CASE = get_resize_output_image_size(A__ , size["""shortest_edge"""] , default_to_square=A__ )
elif "height" in size and "width" in size:
_SCREAMING_SNAKE_CASE = (size["""height"""], size["""width"""])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(A__ , size=A__ , resample=A__ , data_format=A__ , **A__ )
def UpperCamelCase ( self , A__ , A__ , A__ = None , **A__ , ) -> np.ndarray:
_SCREAMING_SNAKE_CASE = get_size_dict(A__ )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(A__ , size=(size["""height"""], size["""width"""]) , data_format=A__ , **A__ )
def UpperCamelCase ( self , A__ , A__ , A__ = True , A__ = None , **A__ , ) -> List[Any]:
_SCREAMING_SNAKE_CASE = image.astype(np.floataa )
if offset:
_SCREAMING_SNAKE_CASE = image - (scale / 2)
return rescale(A__ , scale=A__ , data_format=A__ , **A__ )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ = None , **A__ , ) -> np.ndarray:
return normalize(A__ , mean=A__ , std=A__ , data_format=A__ , **A__ )
def UpperCamelCase ( self , A__ , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
if offset and not do_rescale:
raise ValueError("""For offset, do_rescale must also be set to True.""" )
# All transformations expect numpy arrays.
_SCREAMING_SNAKE_CASE = to_numpy_array(A__ )
if do_resize:
_SCREAMING_SNAKE_CASE = self.resize(image=A__ , size=A__ , resample=A__ )
if do_center_crop:
_SCREAMING_SNAKE_CASE = self.center_crop(A__ , size=A__ )
if do_rescale:
_SCREAMING_SNAKE_CASE = self.rescale(image=A__ , scale=A__ , offset=A__ )
if do_normalize:
_SCREAMING_SNAKE_CASE = self.normalize(image=A__ , mean=A__ , std=A__ )
_SCREAMING_SNAKE_CASE = to_channel_dimension_format(A__ , A__ )
return image
def UpperCamelCase ( self , A__ , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = ChannelDimension.FIRST , **A__ , ) -> PIL.Image.Image:
_SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
_SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
_SCREAMING_SNAKE_CASE = do_center_crop if do_center_crop is not None else self.do_center_crop
_SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
_SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
_SCREAMING_SNAKE_CASE = offset if offset is not None else self.offset
_SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
_SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
_SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
_SCREAMING_SNAKE_CASE = size if size is not None else self.size
_SCREAMING_SNAKE_CASE = get_size_dict(A__ , default_to_square=A__ )
_SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else self.crop_size
_SCREAMING_SNAKE_CASE = get_size_dict(A__ , param_name="""crop_size""" )
if not valid_images(A__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
_SCREAMING_SNAKE_CASE = make_batched(A__ )
_SCREAMING_SNAKE_CASE = [
[
self._preprocess_image(
image=A__ , do_resize=A__ , size=A__ , resample=A__ , do_center_crop=A__ , crop_size=A__ , do_rescale=A__ , rescale_factor=A__ , offset=A__ , do_normalize=A__ , image_mean=A__ , image_std=A__ , data_format=A__ , )
for img in video
]
for video in videos
]
_SCREAMING_SNAKE_CASE = {"""pixel_values""": videos}
return BatchFeature(data=A__ , tensor_type=A__ )
| 721 |
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
UpperCamelCase__ : Tuple = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
UpperCamelCase__ : Union[str, Any] = "sshleifer/student_marian_en_ro_6_1"
UpperCamelCase__ : str = "sshleifer/tiny-mbart"
@require_torch
class _a (_lowerCamelCase):
"""simple docstring"""
def UpperCamelCase ( self , A__=False , A__=None , A__=True , A__=True , A__=True , A__=True , ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=A__ , num_train_epochs=1 , distributed=A__ , extra_args_str=A__ , predict_with_generate=A__ , do_train=A__ , do_eval=A__ , do_predict=A__ , )
_SCREAMING_SNAKE_CASE = TrainerState.load_from_json(os.path.join(A__ , """trainer_state.json""" ) ).log_history
if not do_eval:
return
_SCREAMING_SNAKE_CASE = [log for log in logs if """eval_loss""" in log.keys()]
_SCREAMING_SNAKE_CASE = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
_SCREAMING_SNAKE_CASE = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , A__ )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def UpperCamelCase ( self ) -> Optional[int]:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def UpperCamelCase ( self ) -> Optional[Any]:
self.run_seqaseq_quick(distributed=A__ )
@require_torch_multi_gpu
def UpperCamelCase ( self ) -> Union[str, Any]:
self.run_seqaseq_quick(distributed=A__ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self ) -> Any:
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self ) -> Tuple:
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self ) -> str:
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=A__ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self ) -> List[str]:
self.run_seqaseq_quick(
distributed=A__ , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=A__ )
@require_apex
@require_torch_gpu
def UpperCamelCase ( self ) -> Optional[Any]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def UpperCamelCase ( self , A__ ) -> List[Any]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
_SCREAMING_SNAKE_CASE = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
_SCREAMING_SNAKE_CASE = experiments[experiment_id]
_SCREAMING_SNAKE_CASE = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
_SCREAMING_SNAKE_CASE = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**A__ , extra_args_str=data["""extra_args_str"""] )
_SCREAMING_SNAKE_CASE = len(re.findall(A__ , cl.err ) )
self.assertEqual(A__ , data["""n_matches"""] )
@slow
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=A__ , learning_rate=3E-4 , num_train_epochs=10 , distributed=A__ , )
# Check metrics
_SCREAMING_SNAKE_CASE = TrainerState.load_from_json(os.path.join(A__ , """trainer_state.json""" ) ).log_history
_SCREAMING_SNAKE_CASE = [log for log in logs if """eval_loss""" in log.keys()]
_SCREAMING_SNAKE_CASE = eval_metrics[0]
_SCREAMING_SNAKE_CASE = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , A__ )
# test if do_predict saves generations and metrics
_SCREAMING_SNAKE_CASE = os.listdir(A__ )
_SCREAMING_SNAKE_CASE = {os.path.basename(A__ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def UpperCamelCase ( self ) -> Dict:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(A__ ) -> Tuple[int, float]:
_SCREAMING_SNAKE_CASE = """--skip_memory_metrics 0"""
_SCREAMING_SNAKE_CASE = self.run_trainer(
max_len=1_28 , model_name=A__ , learning_rate=3E-4 , num_train_epochs=1 , optim=A__ , distributed=A__ , extra_args_str=A__ , do_eval=A__ , do_predict=A__ , n_gpus_to_use=1 , )
# Check metrics
_SCREAMING_SNAKE_CASE = TrainerState.load_from_json(Path(A__ , """trainer_state.json""" ) ).log_history
_SCREAMING_SNAKE_CASE = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 )
_SCREAMING_SNAKE_CASE = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 )
_SCREAMING_SNAKE_CASE = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
_SCREAMING_SNAKE_CASE = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
_SCREAMING_SNAKE_CASE = gpu_peak_mem_orig + gpu_alloc_mem_orig
_SCREAMING_SNAKE_CASE = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
_SCREAMING_SNAKE_CASE = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
_SCREAMING_SNAKE_CASE = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
A__ , A__ , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
F" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
F" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
A__ , A__ , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
F" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
F" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
A__ , A__ , F"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}" )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ = 3E-3 , A__ = "adafactor" , A__ = False , A__ = None , A__ = 0 , A__ = True , A__ = True , A__ = True , A__ = True , A__ = None , ) -> Dict:
_SCREAMING_SNAKE_CASE = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
_SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
_SCREAMING_SNAKE_CASE = F"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(A__ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(A__ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
_SCREAMING_SNAKE_CASE = F"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(A__ )}\n ".split()
_SCREAMING_SNAKE_CASE = """
--do_predict
""".split()
_SCREAMING_SNAKE_CASE = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
_SCREAMING_SNAKE_CASE = get_gpu_count()
_SCREAMING_SNAKE_CASE = get_torch_dist_unique_port()
_SCREAMING_SNAKE_CASE = F"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
_SCREAMING_SNAKE_CASE = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(A__ , env=self.get_env() )
else:
_SCREAMING_SNAKE_CASE = ["""run_translation.py"""] + args
with patch.object(A__ , """argv""" , A__ ):
main()
return output_dir
| 0 | 0 |
'''simple docstring'''
from numpy import exp, pi, sqrt
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : float = 0.0 , _lowerCAmelCase : float = 1.0 ):
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 44 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {"vocab_file": "sentencepiece.bpe.model"}
_UpperCAmelCase : List[Any] = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
_UpperCAmelCase : Union[str, Any] = {
"camembert-base": 512,
}
_UpperCAmelCase : Dict = "▁"
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :int = VOCAB_FILES_NAMES
UpperCamelCase_ :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ :Dict = ['input_ids', 'attention_mask']
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any="<s>" , SCREAMING_SNAKE_CASE_ : Tuple="</s>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE_ : Optional[int]="<s>" , SCREAMING_SNAKE_CASE_ : List[Any]="<unk>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE_ : str="<mask>" , SCREAMING_SNAKE_CASE_ : int=["<s>NOTUSED", "</s>NOTUSED"] , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE_ : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowerCAmelCase__ = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3}
lowerCAmelCase__ = len(self.fairseq_tokens_to_ids )
lowerCAmelCase__ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowerCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
lowerCAmelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __snake_case ( self : List[Any] ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def __snake_case ( self : int ):
lowerCAmelCase__ = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : str ):
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : Dict ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase__ = []
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) + token
lowerCAmelCase__ = True
lowerCAmelCase__ = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
return out_string.strip()
def __getstate__( self : Optional[Any] ):
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = None
return state
def __setstate__( self : str , SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCAmelCase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as fi:
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 668 | 0 |
"""simple docstring"""
def lowercase (snake_case__ : Any ) -> Any:
'''simple docstring'''
if not head:
return True
# split the list to two parts
lowerCAmelCase , lowerCAmelCase = head.next, head
while fast and fast.next:
lowerCAmelCase = fast.next.next
lowerCAmelCase = slow.next
lowerCAmelCase = slow.next
lowerCAmelCase = None # Don't forget here! But forget still works!
# reverse the second part
lowerCAmelCase = None
while second:
lowerCAmelCase = second.next
lowerCAmelCase = node
lowerCAmelCase = second
lowerCAmelCase = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
lowerCAmelCase = node.next
lowerCAmelCase = head.next
return True
def lowercase (snake_case__ : Tuple ) -> Dict:
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
lowerCAmelCase = lowerCAmelCase = lowerCAmelCase = head
while fast and fast.next:
lowerCAmelCase , lowerCAmelCase = fast.next.next, slow.next
# 2. Push the second half into the stack
lowerCAmelCase = [slow.val]
while slow.next:
lowerCAmelCase = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
lowerCAmelCase = cur.next
return True
def lowercase (snake_case__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
if not head or not head.next:
return True
lowerCAmelCase = {}
lowerCAmelCase = 0
while head:
if head.val in d:
d[head.val].append(snake_case__ )
else:
lowerCAmelCase = [pos]
lowerCAmelCase = head.next
pos += 1
lowerCAmelCase = pos - 1
lowerCAmelCase = 0
for v in d.values():
if len(snake_case__ ) % 2 != 0:
middle += 1
else:
lowerCAmelCase = 0
for i in range(0 , len(snake_case__ ) ):
if v[i] + v[len(snake_case__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 529 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 529 | 1 |
import argparse
import os
import re
__lowerCamelCase : Any = """src/diffusers"""
# Pattern that looks at the indentation in a line.
__lowerCamelCase : Dict = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
__lowerCamelCase : Tuple = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__lowerCamelCase : List[Any] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
__lowerCamelCase : Tuple = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__lowerCamelCase : str = re.compile(r"""\[([^\]]+)\]""")
def A__ ( _a : Dict ):
'''simple docstring'''
snake_case__ : Union[str, Any] =_re_indent.search(_a )
return "" if search is None else search.groups()[0]
def A__ ( _a : Optional[int] , _a : List[Any]="" , _a : Any=None , _a : Optional[int]=None ):
'''simple docstring'''
snake_case__ : Optional[Any] =0
snake_case__ : str =code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(_a ):
index += 1
snake_case__ : Optional[Any] =["""\n""".join(lines[:index] )]
else:
snake_case__ : str =[]
# We split into blocks until we get to the `end_prompt` (or the end of the block).
snake_case__ : List[Any] =[lines[index]]
index += 1
while index < len(_a ) and (end_prompt is None or not lines[index].startswith(_a )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_a ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(_a ) )
if index < len(_a ) - 1:
snake_case__ : Union[str, Any] =[lines[index + 1]]
index += 1
else:
snake_case__ : Union[str, Any] =[]
else:
blocks.append("""\n""".join(_a ) )
snake_case__ : Any =[lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_a ) > 0:
blocks.append("""\n""".join(_a ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_a ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def A__ ( _a : Optional[int] ):
'''simple docstring'''
def _inner(_a : List[Any] ):
return key(_a ).lower().replace("""_""" , """""" )
return _inner
def A__ ( _a : Optional[Any] , _a : Tuple=None ):
'''simple docstring'''
def noop(_a : Dict ):
return x
if key is None:
snake_case__ : Optional[Any] =noop
# Constants are all uppercase, they go first.
snake_case__ : List[Any] =[obj for obj in objects if key(_a ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
snake_case__ : Dict =[obj for obj in objects if key(_a )[0].isupper() and not key(_a ).isupper()]
# Functions begin with a lowercase, they go last.
snake_case__ : List[Any] =[obj for obj in objects if not key(_a )[0].isupper()]
snake_case__ : Any =ignore_underscore(_a )
return sorted(_a , key=_a ) + sorted(_a , key=_a ) + sorted(_a , key=_a )
def A__ ( _a : Any ):
'''simple docstring'''
def _replace(_a : List[Any] ):
snake_case__ : List[str] =match.groups()[0]
if "," not in imports:
return f"[{imports}]"
snake_case__ : str =[part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case__ : Any =keys[:-1]
return "[" + ", ".join([f"\"{k}\"" for k in sort_objects(_a )] ) + "]"
snake_case__ : Any =import_statement.split("""\n""" )
if len(_a ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
snake_case__ : Optional[Any] =2 if lines[1].strip() == """[""" else 1
snake_case__ : Dict =[(i, _re_strip_line.search(_a ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
snake_case__ : Union[str, Any] =sort_objects(_a , key=lambda _a : x[1] )
snake_case__ : Union[str, Any] =[lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_a ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
snake_case__ : Dict =_re_bracket_content.sub(_replace , lines[1] )
else:
snake_case__ : List[Any] =[part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case__ : List[str] =keys[:-1]
snake_case__ : Optional[Any] =get_indent(lines[1] ) + """, """.join([f"\"{k}\"" for k in sort_objects(_a )] )
return "\n".join(_a )
else:
# Finally we have to deal with imports fitting on one line
snake_case__ : Dict =_re_bracket_content.sub(_replace , _a )
return import_statement
def A__ ( _a : int , _a : Optional[Any]=True ):
'''simple docstring'''
with open(_a , """r""" ) as f:
snake_case__ : Tuple =f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
snake_case__ : str =split_code_in_indented_blocks(
_a , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_a ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
snake_case__ : Tuple =main_blocks[block_idx]
snake_case__ : Any =block.split("""\n""" )
# Get to the start of the imports.
snake_case__ : str =0
while line_idx < len(_a ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
snake_case__ : List[Any] =len(_a )
else:
line_idx += 1
if line_idx >= len(_a ):
continue
# Ignore beginning and last line: they don't contain anything.
snake_case__ : int ="""\n""".join(block_lines[line_idx:-1] )
snake_case__ : Dict =get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
snake_case__ : Optional[Any] =split_code_in_indented_blocks(_a , indent_level=_a )
# We have two categories of import key: list or _import_structure[key].append/extend
snake_case__ : List[Any] =_re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
snake_case__ : Union[str, Any] =[(pattern.search(_a ).groups()[0] if pattern.search(_a ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
snake_case__ : Dict =[(i, key) for i, key in enumerate(_a ) if key is not None]
snake_case__ : int =[x[0] for x in sorted(_a , key=lambda _a : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
snake_case__ : List[Any] =0
snake_case__ : Optional[Any] =[]
for i in range(len(_a ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
snake_case__ : int =sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_a )
count += 1
# And we put our main block back together with its first and last line.
snake_case__ : List[str] ="""\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_a ):
if check_only:
return True
else:
print(f"Overwriting {file}." )
with open(_a , """w""" ) as f:
f.write("""\n""".join(_a ) )
def A__ ( _a : Dict=True ):
'''simple docstring'''
snake_case__ : List[Any] =[]
for root, _, files in os.walk(_a ):
if "__init__.py" in files:
snake_case__ : Optional[Any] =sort_imports(os.path.join(_a , """__init__.py""" ) , check_only=_a )
if result:
snake_case__ : List[Any] =[os.path.join(_a , """__init__.py""" )]
if len(_a ) > 0:
raise ValueError(f"Would overwrite {len(_a )} files, run `make style`." )
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
__lowerCamelCase : int = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 385 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
@slow
def lowercase__ ( self ):
snake_case__ : Union[str, Any] =TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
snake_case__ : List[str] =tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
snake_case__ : Dict =model(a )["""last_hidden_state"""]
snake_case__ : Any =tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , a )
# compare the actual values for a slice.
snake_case__ : str =tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 385 | 1 |
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_SCREAMING_SNAKE_CASE = [
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"""
""" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"""
""" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.""",
"""The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"""
""" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"""
""" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"""
""" body.""",
"""Amnesty International releases its annual report on the death penalty. The report catalogs the use of"""
""" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"""
""" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"""
""" punishment.""",
]
_SCREAMING_SNAKE_CASE = [
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."""
""" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"""
""" had informed his Lufthansa training school of an episode of severe depression, airline says .""",
"""Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."""
""" Israel and the United States opposed the move, which could open the door to war crimes investigations against"""
""" Israelis .""",
"""Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"""
""" death . Organization claims that governments around the world are using the threat of terrorism to advance"""
""" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"""
""" sentences up by 28% .""",
]
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : List[Any] = calculate_rouge(__a , __a , bootstrap_aggregation=__a , rouge_keys=['rouge2', 'rougeL'] )
assert isinstance(__a , __a )
snake_case_ : List[Any] = calculate_rouge(__a , __a , bootstrap_aggregation=__a , rouge_keys=['rouge2'] )
assert (
pd.DataFrame(no_aggregation['rouge2'] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['rouge2'] ).fmeasure.mean()
)
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Any = 'rougeLsum'
snake_case_ : str = calculate_rouge(__a , __a , newline_sep=__a , rouge_keys=[k] )[k]
snake_case_ : Optional[int] = calculate_rouge(__a , __a , newline_sep=__a , rouge_keys=[k] )[k]
assert score > score_no_sep
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Union[str, Any] = ['rouge1', 'rouge2', 'rougeL']
snake_case_ : List[Any] = calculate_rouge(__a , __a , newline_sep=__a , rouge_keys=__a )
snake_case_ : List[str] = calculate_rouge(__a , __a , newline_sep=__a , rouge_keys=__a )
assert score_sep == score_no_sep
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Optional[int] = [
'Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.',
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .',
]
snake_case_ : str = [
'Margot Frank, died in 1945, a month earlier than previously thought.',
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'
' the final seconds on board Flight 9525.',
]
assert calculate_rouge(__a , __a , newline_sep=__a ) == calculate_rouge(__a , __a , newline_sep=__a )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : List[str] = [
'" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '
]
snake_case_ : Optional[int] = [
' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'
]
snake_case_ : int = calculate_rouge(__a , __a , rouge_keys=['rougeLsum'] , newline_sep=__a )['rougeLsum']
snake_case_ : Optional[Any] = calculate_rouge(__a , __a , rouge_keys=['rougeLsum'] )['rougeLsum']
assert new_score > prev_score
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : str = Path('examples/seq2seq/test_data/wmt_en_ro' )
snake_case_ : Optional[Any] = calculate_rouge_path(data_dir.joinpath('test.source' ) , data_dir.joinpath('test.target' ) )
assert isinstance(__a , __a )
snake_case_ : Optional[int] = calculate_rouge_path(
data_dir.joinpath('test.source' ) , data_dir.joinpath('test.target' ) , bootstrap_aggregation=__a )
assert isinstance(__a , __a )
| 534 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
if "xprophetnet" in prophetnet_checkpoint_path:
snake_case_ : int = XLMProphetNetForConditionalGenerationOld.from_pretrained(__a )
snake_case_ ,snake_case_ : List[Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
__a , output_loading_info=__a )
else:
snake_case_ : List[str] = ProphetNetForConditionalGenerationOld.from_pretrained(__a )
snake_case_ ,snake_case_ : Dict = ProphetNetForConditionalGeneration.from_pretrained(
__a , output_loading_info=__a )
snake_case_ : str = ['key_proj', 'value_proj', 'query_proj']
snake_case_ : List[str] = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
snake_case_ : Union[str, Any] = key.split('.' )
if attributes[0] == "lm_head":
snake_case_ : Optional[Any] = prophet
snake_case_ : Any = prophet_old
else:
snake_case_ : Optional[int] = prophet.prophetnet
snake_case_ : str = prophet_old.model
snake_case_ : Union[str, Any] = False
for attribute in attributes:
if attribute in mapping:
snake_case_ : Optional[Any] = mapping[attribute]
if not hasattr(__a , __a ) and len(__a ) > 0:
snake_case_ : List[Any] = attribute
elif hasattr(__a , __a ):
snake_case_ : List[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
snake_case_ : int = old_model.weight
logger.info(f"""{attribute} is initialized.""" )
snake_case_ : Optional[int] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
snake_case_ : List[str] = old_model.bias
logger.info(f"""{attribute} is initialized""" )
snake_case_ : int = True
break
elif attribute in special_keys and hasattr(__a , 'in_proj_weight' ):
snake_case_ : Optional[Any] = old_model.in_proj_weight.shape[0] // 3
snake_case_ : List[Any] = getattr(__a , __a )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
snake_case_ : Tuple = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
snake_case_ : Optional[int] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
snake_case_ : int = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
snake_case_ : Any = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
snake_case_ : List[Any] = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
snake_case_ : str = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
snake_case_ : Any = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
snake_case_ : Any = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
snake_case_ : List[Any] = True
break
if attribute.isdigit():
snake_case_ : Any = model[int(__a )]
snake_case_ : Any = old_model[int(__a )]
else:
snake_case_ : str = getattr(__a , __a )
if old_attribute == "":
snake_case_ : Tuple = old_model
else:
if not hasattr(__a , __a ):
raise ValueError(f"""{old_model} does not have {old_attribute}""" )
snake_case_ : List[str] = getattr(__a , __a )
if not is_key_init:
raise ValueError(f"""{key} was not correctly initialized!""" )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(__a )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 534 | 1 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _A ( ):
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__lowercase = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _A ( ):
"""simple docstring"""
assert _test_patching.open is open
__lowercase = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , A__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , A__ ):
pass
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , A__ ) is None
with patch_submodule(_test_patching , '''len''' , A__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_start_and_stop_mock__'''
__lowercase = patch_submodule(_test_patching , '''open''' , A__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__lowercase = '''__test_patch_submodule_successive_join__'''
__lowercase = '''__test_patch_submodule_successive_dirname__'''
__lowercase = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
with patch_submodule(_test_patching , '''os.rename''' , A__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , A__ ):
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , A__ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , A__ ):
pass
| 41 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
lowerCAmelCase__: Dict = logging.get_logger(__name__)
lowerCAmelCase__: Dict = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class snake_case_ :
def __init__( self , __lowerCAmelCase=None , **__lowerCAmelCase ):
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.get('model_save_dir' , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs.get('latest_model_name' , __lowerCAmelCase )
def __call__( self , **__lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = {k: np.array(__lowerCAmelCase ) for k, v in kwargs.items()}
return self.model.run(__lowerCAmelCase , __lowerCAmelCase )
@staticmethod
def __A ( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None ):
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
SCREAMING_SNAKE_CASE_ : Tuple = 'CPUExecutionProvider'
return ort.InferenceSession(__lowerCAmelCase , providers=[provider] , sess_options=__lowerCAmelCase )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_save_dir.joinpath(self.latest_model_name )
SCREAMING_SNAKE_CASE_ : int = Path(__lowerCAmelCase ).joinpath(__lowerCAmelCase )
try:
shutil.copyfile(__lowerCAmelCase , __lowerCAmelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
SCREAMING_SNAKE_CASE_ : str = self.model_save_dir.joinpath(__lowerCAmelCase )
if src_path.exists():
SCREAMING_SNAKE_CASE_ : List[str] = Path(__lowerCAmelCase ).joinpath(__lowerCAmelCase )
try:
shutil.copyfile(__lowerCAmelCase , __lowerCAmelCase )
except shutil.SameFileError:
pass
def __A ( self , __lowerCAmelCase , **__lowerCAmelCase , ):
if os.path.isfile(__lowerCAmelCase ):
logger.error(F'Provided path ({save_directory}) should be a directory, not a file' )
return
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
# saving model weights/files
self._save_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
@classmethod
def __A ( cls , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
SCREAMING_SNAKE_CASE_ : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = OnnxRuntimeModel.load_model(
os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , provider=__lowerCAmelCase , sess_options=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = Path(__lowerCAmelCase )
# load model from hub
else:
# download model
SCREAMING_SNAKE_CASE_ : int = hf_hub_download(
repo_id=__lowerCAmelCase , filename=__lowerCAmelCase , use_auth_token=__lowerCAmelCase , revision=__lowerCAmelCase , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ : str = Path(__lowerCAmelCase ).parent
SCREAMING_SNAKE_CASE_ : Optional[Any] = Path(__lowerCAmelCase ).name
SCREAMING_SNAKE_CASE_ : Tuple = OnnxRuntimeModel.load_model(__lowerCAmelCase , provider=__lowerCAmelCase , sess_options=__lowerCAmelCase )
return cls(model=__lowerCAmelCase , **__lowerCAmelCase )
@classmethod
def __A ( cls , __lowerCAmelCase , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
SCREAMING_SNAKE_CASE_ : Tuple = None
if len(str(__lowerCAmelCase ).split('@' ) ) == 2:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = model_id.split('@' )
return cls._from_pretrained(
model_id=__lowerCAmelCase , revision=__lowerCAmelCase , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , use_auth_token=__lowerCAmelCase , **__lowerCAmelCase , )
| 345 | 0 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
A_ :int = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def A ( a_ = "mumbai" ) -> Generator[tuple[str, str], None, None]:
__UpperCamelCase : List[str] =BeautifulSoup(requests.get(url + location ).content ,'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div' ,attrs={'data-tn-component': 'organicJob'} ):
__UpperCamelCase : Dict =job.find('a' ,attrs={'data-tn-element': 'jobTitle'} ).text.strip()
__UpperCamelCase : Dict =job.find('span' ,{'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(f"Job {i:>2} is {job[0]} at {job[1]}")
| 154 |
from collections.abc import Generator
def A ( ) -> Generator[int, None, None]:
__UpperCamelCase , __UpperCamelCase : Tuple =0, 1
while True:
__UpperCamelCase , __UpperCamelCase : Union[str, Any] =b, a + b
yield b
def A ( a_ = 1_000 ) -> int:
__UpperCamelCase : Tuple =1
__UpperCamelCase : Tuple =fibonacci_generator()
while len(str(next(a_ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 154 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
_snake_case : Dict = ort.SessionOptions()
_snake_case : str = False
return options
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
_snake_case : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
_snake_case : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy' )
# using the PNDM scheduler by default
_snake_case : Tuple = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=snake_case , feature_extractor=snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case )
_snake_case : str = 'A red cat sitting on a park bench'
_snake_case : Optional[int] = np.random.RandomState(0 )
_snake_case : List[Any] = pipe(
prompt=snake_case , image=snake_case , mask_image=snake_case , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=snake_case , output_type='np' , )
_snake_case : List[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 517 |
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"--original_config_file",
default=None,
type=str,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--scheduler_type",
default="pndm",
type=str,
help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']",
)
parser.add_argument(
"--pipeline_type",
default=None,
type=str,
help=(
"The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"
". If `None` pipeline will be automatically inferred."
),
)
parser.add_argument(
"--image_size",
default=None,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--prediction_type",
default=None,
type=str,
help=(
"The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"
" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
parser.add_argument(
"--stable_unclip",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.",
)
parser.add_argument(
"--stable_unclip_prior",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.",
)
parser.add_argument(
"--clip_stats_path",
type=str,
help="Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.",
required=False,
)
parser.add_argument(
"--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint."
)
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--vae_path",
type=str,
default=None,
required=False,
help="Set to a path, hub id to an already converted vae to not convert it again.",
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 517 | 1 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar('_T')
class _lowerCAmelCase ( Generic[_T] ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase_ : Iterable[_T] | None = None ) -> None:
'''simple docstring'''
_lowercase : list[_T] = list(iterable or [] )
_lowercase : list[_T] = []
def __len__( self : int ) -> int:
'''simple docstring'''
return len(self._stacka ) + len(self._stacka )
def __repr__( self : Optional[Any] ) -> str:
'''simple docstring'''
return F"Queue({tuple(self._stacka[::-1] + self._stacka )})"
def __lowercase ( self : int , UpperCamelCase_ : _T ) -> None:
'''simple docstring'''
self._stacka.append(UpperCamelCase_ )
def __lowercase ( self : List[str] ) -> _T:
'''simple docstring'''
_lowercase : Union[str, Any] = self._stacka.pop
_lowercase : Any = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError('''Queue is empty''' )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 411 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowercase ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : str = TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''' )
_lowercase : List[str] = {
'''input_ids''': tf.convert_to_tensor([[0, 2_646, 10_269, 83, 99_942, 2]] , dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
_lowercase : Any = model(UpperCamelCase_ )['''last_hidden_state''']
_lowercase : List[Any] = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice.
_lowercase : Optional[int] = tf.convert_to_tensor(
[
[
[0.0_681_762, 0.10_894_451, 0.06_772_504],
[-0.06_423_668, 0.02_366_615, 0.04_329_344],
[-0.06_057_295, 0.09_974_135, -0.00_070_584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 411 | 1 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCamelCase__ : int = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :Tuple , *lowerCamelCase_ :Tuple , **lowerCamelCase_ :Optional[int] ) -> Tuple:
'''simple docstring'''
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :List[Any]=None , lowerCamelCase_ :int=None ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = {}
SCREAMING_SNAKE_CASE : Any = {}
if prompt is not None:
SCREAMING_SNAKE_CASE : List[str] = prompt
if generate_kwargs is not None:
SCREAMING_SNAKE_CASE : Dict = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
SCREAMING_SNAKE_CASE : int = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
SCREAMING_SNAKE_CASE : Optional[int] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self :Optional[Any] , lowerCamelCase_ :Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCamelCase_ :str ) -> Union[str, Any]:
'''simple docstring'''
return super().__call__(lowerCamelCase_ , **lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :int , lowerCamelCase_ :Union[str, Any]=None ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = load_image(lowerCamelCase_ )
if prompt is not None:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError(
f"Received an invalid text input, got - {type(lowerCamelCase_ )} - but expected a single string. "
'''Note also that one single text can be provided for conditional image to text generation.''' )
SCREAMING_SNAKE_CASE : int = self.model.config.model_type
if model_type == "git":
SCREAMING_SNAKE_CASE : Any = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer(text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids
SCREAMING_SNAKE_CASE : Tuple = [self.tokenizer.cls_token_id] + input_ids
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
SCREAMING_SNAKE_CASE : Tuple = self.image_processor(images=lowerCamelCase_ , header_text=lowerCamelCase_ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
SCREAMING_SNAKE_CASE : int = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE : str = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework )
model_inputs.update(lowerCamelCase_ )
else:
raise ValueError(f"Model type {model_type} does not support conditional text generation" )
else:
SCREAMING_SNAKE_CASE : Any = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
SCREAMING_SNAKE_CASE : Dict = None
return model_inputs
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[str]=None ) -> List[str]:
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , lowerCamelCase_ )
and all(x is None for x in model_inputs['''input_ids'''] )
):
SCREAMING_SNAKE_CASE : str = None
if generate_kwargs is None:
SCREAMING_SNAKE_CASE : Optional[int] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
SCREAMING_SNAKE_CASE : Dict = model_inputs.pop(self.model.main_input_name )
SCREAMING_SNAKE_CASE : List[Any] = self.model.generate(lowerCamelCase_ , **lowerCamelCase_ , **lowerCamelCase_ )
return model_outputs
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = []
for output_ids in model_outputs:
SCREAMING_SNAKE_CASE : List[str] = {
'''generated_text''': self.tokenizer.decode(
lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , )
}
records.append(lowerCamelCase_ )
return records
| 698 |
"""simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def __A ( a_ : Callable[[int | float], int | float] , a_ : int | float , a_ : int | float , a_ : int = 1_00 , )-> float:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = x_start
SCREAMING_SNAKE_CASE : Union[str, Any] = fnc(a_ )
SCREAMING_SNAKE_CASE : Optional[int] = 0.0
for _ in range(a_ ):
# Approximates curve as a sequence of linear lines and sums their length
SCREAMING_SNAKE_CASE : int = (x_end - x_start) / steps + xa
SCREAMING_SNAKE_CASE : Optional[int] = fnc(a_ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
SCREAMING_SNAKE_CASE : str = xa
SCREAMING_SNAKE_CASE : Any = fxa
return length
if __name__ == "__main__":
def __A ( a_ : Optional[Any] )-> List[Any]:
'''simple docstring'''
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
lowerCamelCase__ : str = 10
while i <= 100000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 698 | 1 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ):
snake_case__ = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"""
snake_case__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ).convert("""RGB""" )
snake_case__ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_145_466, 0.4_578_275, 0.40_821_073) , (0.26_862_954, 0.26_130_258, 0.27_577_711) ),
] )
snake_case__ = transform(__lowerCAmelCase ).unsqueeze(0 ).to(__lowerCAmelCase )
return image
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
if "visual_encoder" in key:
snake_case__ = re.sub("""visual_encoder*""" , """vision_model.encoder""" , __lowerCAmelCase )
if "blocks" in key:
snake_case__ = re.sub(R"""blocks""" , """layers""" , __lowerCAmelCase )
if "attn" in key:
snake_case__ = re.sub(R"""attn""" , """self_attn""" , __lowerCAmelCase )
if "norm1" in key:
snake_case__ = re.sub(R"""norm1""" , """layer_norm1""" , __lowerCAmelCase )
if "norm2" in key:
snake_case__ = re.sub(R"""norm2""" , """layer_norm2""" , __lowerCAmelCase )
if "encoder.norm" in key:
snake_case__ = re.sub(R"""encoder.norm""" , """post_layernorm""" , __lowerCAmelCase )
if "encoder.patch_embed.proj" in key:
snake_case__ = re.sub(R"""encoder.patch_embed.proj""" , """embeddings.patch_embedding""" , __lowerCAmelCase )
if "encoder.pos_embed" in key:
snake_case__ = re.sub(R"""encoder.pos_embed""" , """embeddings.position_embedding""" , __lowerCAmelCase )
if "encoder.cls_token" in key:
snake_case__ = re.sub(R"""encoder.cls_token""" , """embeddings.class_embedding""" , __lowerCAmelCase )
if "self_attn" in key:
snake_case__ = re.sub(R"""self_attn.proj""" , """self_attn.projection""" , __lowerCAmelCase )
return key
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase=None ):
if config_path is not None:
snake_case__ = BlipConfig.from_pretrained(__lowerCAmelCase )
else:
snake_case__ = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
snake_case__ = BlipForConditionalGeneration(__lowerCAmelCase ).eval()
snake_case__ = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"""
snake_case__ = blip_decoder(pretrained=__lowerCAmelCase , image_size=384 , vit="""base""" )
snake_case__ = pt_model.eval()
snake_case__ = pt_model.state_dict()
for key in modified_state_dict.copy():
snake_case__ = modified_state_dict.pop(__lowerCAmelCase )
snake_case__ = rename_key(__lowerCAmelCase )
snake_case__ = value
hf_model.load_state_dict(__lowerCAmelCase )
snake_case__ = 384
snake_case__ = load_demo_image(image_size=__lowerCAmelCase , device="""cpu""" )
snake_case__ = BertTokenizer.from_pretrained("""bert-base-uncased""" )
snake_case__ = tokenizer(["""a picture of"""] ).input_ids
snake_case__ = hf_model.generate(__lowerCAmelCase , __lowerCAmelCase )
assert out[0].tolist() == [30_522, 1_037, 3_861, 1_997, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
snake_case__ = hf_model.generate(__lowerCAmelCase )
assert out[0].tolist() == [30_522, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(__lowerCAmelCase )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
snake_case__ = (
"""https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"""
)
snake_case__ = blip_vqa(pretrained=__lowerCAmelCase , image_size=__lowerCAmelCase , vit="""base""" )
vqa_model.eval()
snake_case__ = vqa_model.state_dict()
for key in modified_state_dict.copy():
snake_case__ = modified_state_dict.pop(__lowerCAmelCase )
snake_case__ = rename_key(__lowerCAmelCase )
snake_case__ = value
snake_case__ = BlipForQuestionAnswering(__lowerCAmelCase )
hf_vqa_model.load_state_dict(__lowerCAmelCase )
snake_case__ = ["""How many dogs are in this image?"""]
snake_case__ = tokenizer(__lowerCAmelCase , return_tensors="""pt""" ).input_ids
snake_case__ = hf_vqa_model.generate(__lowerCAmelCase , __lowerCAmelCase )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + """_vqa""" )
snake_case__ = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"""
snake_case__ = blip_itm(pretrained=__lowerCAmelCase , image_size=__lowerCAmelCase , vit="""base""" )
itm_model.eval()
snake_case__ = itm_model.state_dict()
for key in modified_state_dict.copy():
snake_case__ = modified_state_dict.pop(__lowerCAmelCase )
snake_case__ = rename_key(__lowerCAmelCase )
snake_case__ = value
snake_case__ = BlipForImageTextRetrieval(__lowerCAmelCase )
snake_case__ = ["""A picture of a woman with a dog sitting in a beach"""]
snake_case__ = tokenizer(
__lowerCAmelCase , return_tensors="""pt""" , padding="""max_length""" , truncation=__lowerCAmelCase , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(__lowerCAmelCase )
hf_itm_model.eval()
snake_case__ = hf_itm_model(__lowerCAmelCase , __lowerCAmelCase , use_itm_head=__lowerCAmelCase )
snake_case__ = hf_itm_model(__lowerCAmelCase , __lowerCAmelCase , use_itm_head=__lowerCAmelCase )
assert out[0].item() == 0.2_110_687_494_277_954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45_698_845_386_505_127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + """_itm""" )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
__magic_name__ = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 704 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=512,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"""could not parse string as bool {string}""" )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
__magic_name__ = parser.parse_args()
__magic_name__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 530 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case : List[str] = {
"configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"],
"processing_mgp_str": ["MgpstrProcessor"],
"tokenization_mgp_str": ["MgpstrTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Union[str, Any] = [
"MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST",
"MgpstrModel",
"MgpstrPreTrainedModel",
"MgpstrForSceneTextRecognition",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
_snake_case : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 81 |
_snake_case : Optional[int] = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
_snake_case : Dict = ["a", "b", "c", "d", "e"]
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : List[str] = start
# add current to visited
visited.append(__lowerCamelCase )
__snake_case : List[Any] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__snake_case : Tuple = topological_sort(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# if all neighbors visited add current to sort
sort.append(__lowerCamelCase )
# if all vertices haven't been visited select a new one to visit
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
for vertice in vertices:
if vertice not in visited:
__snake_case : int = topological_sort(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# return sort
return sort
if __name__ == "__main__":
_snake_case : List[Any] = topological_sort("a", [], [])
print(sort)
| 81 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Any = """transfo-xl"""
_UpperCamelCase : Optional[int] = ["""mems"""]
_UpperCamelCase : int = {
"""n_token""": """vocab_size""",
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , snake_case=26_7735 , snake_case=[2_0000, 4_0000, 20_0000] , snake_case=1024 , snake_case=1024 , snake_case=16 , snake_case=64 , snake_case=4096 , snake_case=4 , snake_case=False , snake_case=18 , snake_case=1600 , snake_case=1000 , snake_case=True , snake_case=True , snake_case=0 , snake_case=-1 , snake_case=True , snake_case=0.1 , snake_case=0.0 , snake_case=True , snake_case="normal" , snake_case=0.01 , snake_case=0.01 , snake_case=0.02 , snake_case=1E-5 , snake_case=0 , **snake_case , ):
lowercase = vocab_size
lowercase = []
self.cutoffs.extend(snake_case )
if proj_share_all_but_first:
lowercase = [False] + [True] * len(self.cutoffs )
else:
lowercase = [False] + [False] * len(self.cutoffs )
lowercase = d_model
lowercase = d_embed
lowercase = d_head
lowercase = d_inner
lowercase = div_val
lowercase = pre_lnorm
lowercase = n_layer
lowercase = n_head
lowercase = mem_len
lowercase = same_length
lowercase = attn_type
lowercase = clamp_len
lowercase = sample_softmax
lowercase = adaptive
lowercase = dropout
lowercase = dropatt
lowercase = untie_r
lowercase = init
lowercase = init_range
lowercase = proj_init_std
lowercase = init_std
lowercase = layer_norm_epsilon
super().__init__(eos_token_id=snake_case , **snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
# Message copied from Transformer-XL documentation
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 565 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''TimmBackbone''']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 565 | 1 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _UpperCamelCase ( unittest.TestCase , _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: List[Any] = load_tool("""text-classification""" )
self.tool.setup()
UpperCamelCase_: Optional[int] = load_tool("""text-classification""" , remote=snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Union[str, Any] = self.tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(snake_case_ , """positive""" )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Tuple = self.remote_tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(snake_case_ , """positive""" )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: int = self.tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(snake_case_ , """positive""" )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: List[Any] = self.remote_tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(snake_case_ , """positive""" )
| 548 |
import heapq
import sys
import numpy as np
lowerCamelCase_ : Optional[Any] = tuple[int, int]
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Any ):
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: str = set()
def lowerCAmelCase__ ( self : List[Any] ):
if not self.empty():
return self.elements[0][0]
else:
return float("""inf""" )
def lowerCAmelCase__ ( self : List[Any] ):
return len(self.elements ) == 0
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : int , snake_case_ : Dict ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(snake_case_ )
else:
# update
# print("update", item)
UpperCamelCase_: Any = []
((UpperCamelCase_), (UpperCamelCase_)): Any = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((UpperCamelCase_), (UpperCamelCase_)): str = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : Union[str, Any] ):
if item in self.set:
self.set.remove(snake_case_ )
UpperCamelCase_: Dict = []
((UpperCamelCase_), (UpperCamelCase_)): Tuple = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((UpperCamelCase_), (UpperCamelCase_)): Any = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def lowerCAmelCase__ ( self : Optional[int] ):
return self.elements[0][1]
def lowerCAmelCase__ ( self : List[str] ):
((UpperCamelCase_), (UpperCamelCase_)): Any = heapq.heappop(self.elements )
self.set.remove(snake_case_ )
return (priority, item)
def A__ ( lowerCamelCase , lowerCamelCase ) -> str:
# euclidean distance
UpperCamelCase_: Any = np.array(lowerCamelCase )
UpperCamelCase_: Tuple = np.array(lowerCamelCase )
return np.linalg.norm(a - b )
def A__ ( lowerCamelCase , lowerCamelCase ) -> List[str]:
# integer division by time variable
return consistent_heuristic(lowerCamelCase , lowerCamelCase ) // t
def A__ ( lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
UpperCamelCase_: Dict = g_function[start] + Wa * heuristics[i](lowerCamelCase , lowerCamelCase )
return ans
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
UpperCamelCase_: Any = np.chararray((n, n) )
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
UpperCamelCase_: List[str] = """*"""
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
if (j, (n - 1) - i) in blocks:
UpperCamelCase_: List[str] = """#"""
UpperCamelCase_: int = """-"""
UpperCamelCase_: Tuple = back_pointer[goal]
while x != start:
((UpperCamelCase_), (UpperCamelCase_)): List[str] = x
# print(x)
UpperCamelCase_: List[Any] = """-"""
UpperCamelCase_: List[str] = back_pointer[x]
UpperCamelCase_: Optional[Any] = """-"""
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=""" """ )
print("""<-- End position""" , end=""" """ )
else:
print(grid[i][j] , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
print("""PATH TAKEN BY THE ALGORITHM IS:-""" )
UpperCamelCase_: Optional[Any] = back_pointer[goal]
while x != start:
print(lowerCamelCase , end=""" """ )
UpperCamelCase_: Any = back_pointer[x]
print(lowerCamelCase )
sys.exit()
def A__ ( lowerCamelCase ) -> Optional[Any]:
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> Any:
for itera in range(lowerCamelCase ):
open_list[itera].remove_element(lowerCamelCase )
# print("s", s)
# print("j", j)
((UpperCamelCase_), (UpperCamelCase_)): Optional[Any] = s
UpperCamelCase_: Dict = (x - 1, y)
UpperCamelCase_: Dict = (x + 1, y)
UpperCamelCase_: Any = (x, y + 1)
UpperCamelCase_: Union[str, Any] = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowerCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowerCamelCase )
UpperCamelCase_: Dict = -1
UpperCamelCase_: Any = float("""inf""" )
if valid(lowerCamelCase ) and g_function[neighbours] > g_function[s] + 1:
UpperCamelCase_: int = g_function[s] + 1
UpperCamelCase_: str = s
if neighbours not in close_list_anchor:
open_list[0].put(lowerCamelCase , key(lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , lowerCamelCase ):
if key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) <= Wa * key(
lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ):
open_list[j].put(
lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
def A__ ( ) -> int:
UpperCamelCase_: List[Any] = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
lowerCamelCase_ : int = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
lowerCamelCase_ : List[str] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
lowerCamelCase_ : int = make_common_ground()
lowerCamelCase_ : List[Any] = blocks_blk
# hyper parameters
lowerCamelCase_ : Dict = 1
lowerCamelCase_ : Union[str, Any] = 1
lowerCamelCase_ : str = 20
lowerCamelCase_ : Any = 3 # one consistent and two other inconsistent
# start and end destination
lowerCamelCase_ : Optional[Any] = (0, 0)
lowerCamelCase_ : int = (n - 1, n - 1)
lowerCamelCase_ : Optional[Any] = 1
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
UpperCamelCase_: List[str] = {start: 0, goal: float("""inf""" )}
UpperCamelCase_: Dict = {start: -1, goal: -1}
UpperCamelCase_: Any = []
UpperCamelCase_: List[str] = set()
for i in range(lowerCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
UpperCamelCase_: list[int] = []
UpperCamelCase_: list[int] = []
while open_list[0].minkey() < float("""inf""" ):
for i in range(1 , lowerCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
UpperCamelCase_, UpperCamelCase_: Any = open_list[i].top_show()
visited.add(lowerCamelCase )
expand_state(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
close_list_inad.append(lowerCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
UpperCamelCase_: Dict = open_list[0].top_show()
visited.add(lowerCamelCase )
expand_state(
lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
close_list_anchor.append(lowerCamelCase )
print("""No path found to goal""" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(lowerCamelCase ):
if (j, i) in blocks:
print("""#""" , end=""" """ )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("""*""" , end=""" """ )
else:
print("""-""" , end=""" """ )
else:
print("""*""" , end=""" """ )
if (j, i) == (n - 1, n - 1):
print("""<-- End position""" , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 548 | 1 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : Tuple = logging.get_logger(__name__)
snake_case_ : Tuple = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def A (__A : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
UpperCAmelCase_ = k.replace(__A , __A )
if k.startswith('''encoder''' ):
UpperCAmelCase_ = k.replace('''.attn''' , '''.self_attn''' )
UpperCAmelCase_ = k.replace('''norm1''' , '''self_attn_layer_norm''' )
UpperCAmelCase_ = k.replace('''norm2''' , '''final_layer_norm''' )
elif k.startswith('''decoder''' ):
UpperCAmelCase_ = k.replace('''norm1''' , '''self_attn_layer_norm''' )
UpperCAmelCase_ = k.replace('''norm2''' , '''encoder_attn_layer_norm''' )
UpperCAmelCase_ = k.replace('''norm3''' , '''final_layer_norm''' )
return k
def A (__A : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = [
'''model.encoder.layernorm_embedding.weight''',
'''model.encoder.layernorm_embedding.bias''',
'''model.decoder.layernorm_embedding.weight''',
'''model.decoder.layernorm_embedding.bias''',
]
for k in keys:
UpperCAmelCase_ = sd.pop(__A )
UpperCAmelCase_ = k.replace('''layernorm_embedding''' , '''layer_norm''' )
assert new_k not in sd
UpperCAmelCase_ = v
snake_case_ : str = ["START"]
@torch.no_grad()
def A (__A : List[Any] , __A : Tuple , __A : Optional[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ = torch.load(__A , map_location='''cpu''' )
UpperCAmelCase_ = model['''model''']
UpperCAmelCase_ = BlenderbotConfig.from_json_file(__A )
UpperCAmelCase_ = BlenderbotForConditionalGeneration(__A )
UpperCAmelCase_ = m.model.state_dict().keys()
UpperCAmelCase_ = []
UpperCAmelCase_ = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
UpperCAmelCase_ = rename_state_dict_key(__A )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
UpperCAmelCase_ = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__A )
m.model.load_state_dict(__A , strict=__A )
m.half()
m.save_pretrained(__A )
if __name__ == "__main__":
snake_case_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
snake_case_ : List[str] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 169 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def A (__A : Tuple , __A : List[Any]=None ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = None
if token is not None:
UpperCAmelCase_ = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"""Bearer {token}"""}
UpperCAmelCase_ = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
UpperCAmelCase_ = requests.get(__A , headers=__A ).json()
UpperCAmelCase_ = {}
try:
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
UpperCAmelCase_ = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(__A ):
UpperCAmelCase_ = requests.get(url + F"""&page={i + 2}""" , headers=__A ).json()
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def A (__A : Union[str, Any] , __A : Optional[Any]=None ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = None
if token is not None:
UpperCAmelCase_ = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"""Bearer {token}"""}
UpperCAmelCase_ = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
UpperCAmelCase_ = requests.get(__A , headers=__A ).json()
UpperCAmelCase_ = {}
try:
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
UpperCAmelCase_ = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(__A ):
UpperCAmelCase_ = requests.get(url + F"""&page={i + 2}""" , headers=__A ).json()
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def A (__A : Any , __A : Tuple , __A : Optional[int] , __A : List[str] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = None
if token is not None:
UpperCAmelCase_ = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"""Bearer {token}"""}
UpperCAmelCase_ = requests.get(__A , headers=__A , allow_redirects=__A )
UpperCAmelCase_ = result.headers['''Location''']
UpperCAmelCase_ = requests.get(__A , allow_redirects=__A )
UpperCAmelCase_ = os.path.join(__A , F"""{artifact_name}.zip""" )
with open(__A , '''wb''' ) as fp:
fp.write(response.content )
def A (__A : Union[str, Any] , __A : Optional[int]=None ) -> int:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = None
with zipfile.ZipFile(__A ) as z:
for filename in z.namelist():
if not os.path.isdir(__A ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__A ) as f:
for line in f:
UpperCAmelCase_ = line.decode('''UTF-8''' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
UpperCAmelCase_ = line[: line.index(''': ''' )]
UpperCAmelCase_ = line[line.index(''': ''' ) + len(''': ''' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('''FAILED ''' ):
# `test` is the test method that failed
UpperCAmelCase_ = line[len('''FAILED ''' ) :]
failed_tests.append(__A )
elif filename == "job_name.txt":
UpperCAmelCase_ = line
if len(__A ) != len(__A ):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(__A )} for `errors` """
F"""and {len(__A )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
''' problem.''' )
UpperCAmelCase_ = None
if job_name and job_links:
UpperCAmelCase_ = job_links.get(__A , __A )
# A list with elements of the form (line of error, error, failed test)
UpperCAmelCase_ = [x + [y] + [job_link] for x, y in zip(__A , __A )]
return result
def A (__A : List[str] , __A : Any=None ) -> int:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = [os.path.join(__A , __A ) for p in os.listdir(__A ) if p.endswith('''.zip''' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__A , job_links=__A ) )
return errors
def A (__A : Tuple , __A : Dict=None ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = Counter()
counter.update([x[1] for x in logs] )
UpperCAmelCase_ = counter.most_common()
UpperCAmelCase_ = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
UpperCAmelCase_ = {'''count''': count, '''failed_tests''': [(x[2], x[0]) for x in logs if x[1] == error]}
UpperCAmelCase_ = dict(sorted(r.items() , key=lambda __A : item[1]["count"] , reverse=__A ) )
return r
def A (__A : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = test.split('''::''' )[0]
if test.startswith('''tests/models/''' ):
UpperCAmelCase_ = test.split('''/''' )[2]
else:
UpperCAmelCase_ = None
return test
def A (__A : str , __A : int=None ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = [(x[0], x[1], get_model(x[2] )) for x in logs]
UpperCAmelCase_ = [x for x in logs if x[2] is not None]
UpperCAmelCase_ = {x[2] for x in logs}
UpperCAmelCase_ = {}
for test in tests:
UpperCAmelCase_ = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
UpperCAmelCase_ = counter.most_common()
UpperCAmelCase_ = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
UpperCAmelCase_ = sum(error_counts.values() )
if n_errors > 0:
UpperCAmelCase_ = {'''count''': n_errors, '''errors''': error_counts}
UpperCAmelCase_ = dict(sorted(r.items() , key=lambda __A : item[1]["count"] , reverse=__A ) )
return r
def A (__A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = '''| no. | error | status |'''
UpperCAmelCase_ = '''|-:|:-|:-|'''
UpperCAmelCase_ = [header, sep]
for error in reduced_by_error:
UpperCAmelCase_ = reduced_by_error[error]['''count''']
UpperCAmelCase_ = F"""| {count} | {error[:100]} | |"""
lines.append(__A )
return "\n".join(__A )
def A (__A : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = '''| model | no. of errors | major error | count |'''
UpperCAmelCase_ = '''|-:|-:|-:|-:|'''
UpperCAmelCase_ = [header, sep]
for model in reduced_by_model:
UpperCAmelCase_ = reduced_by_model[model]['''count''']
UpperCAmelCase_ , UpperCAmelCase_ = list(reduced_by_model[model]['''errors'''].items() )[0]
UpperCAmelCase_ = F"""| {model} | {count} | {error[:60]} | {_count} |"""
lines.append(__A )
return "\n".join(__A )
if __name__ == "__main__":
snake_case_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
snake_case_ : Union[str, Any] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
snake_case_ : Dict = get_job_links(args.workflow_run_id, token=args.token)
snake_case_ : Dict = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
snake_case_ : List[Any] = k.find(" / ")
snake_case_ : List[str] = k[index + len(" / ") :]
snake_case_ : Optional[int] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
snake_case_ : Optional[int] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
snake_case_ : Optional[Any] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
snake_case_ : str = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
snake_case_ : Dict = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
snake_case_ : str = reduce_by_error(errors)
snake_case_ : Optional[Any] = reduce_by_model(errors)
snake_case_ : int = make_github_table(reduced_by_error)
snake_case_ : Optional[int] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 169 | 1 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = FileLock(str(tmpdir / "foo.lock" ) )
UpperCAmelCase_ = FileLock(str(tmpdir / "foo.lock" ) )
UpperCAmelCase_ = 0.01
with locka.acquire():
with pytest.raises(snake_case_ ):
UpperCAmelCase_ = time.time()
locka.acquire(snake_case_ )
assert time.time() - _start > timeout
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = "a" * 10_00 + ".lock"
UpperCAmelCase_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(snake_case_ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
UpperCAmelCase_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(snake_case_ ):
locka.acquire(0 )
| 78 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__lowerCAmelCase = logging.get_logger(__name__)
# General docstring
__lowerCAmelCase = 'MobileNetV1Config'
# Base docstring
__lowerCAmelCase = 'google/mobilenet_v1_1.0_224'
__lowerCAmelCase = [1, 1_024, 7, 7]
# Image classification docstring
__lowerCAmelCase = 'google/mobilenet_v1_1.0_224'
__lowerCAmelCase = 'tabby, tabby cat'
__lowerCAmelCase = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
_snake_case = {}
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = model.mobilenet_va
else:
_snake_case = model
_snake_case = """MobilenetV1/Conv2d_0/"""
_snake_case = backbone.conv_stem.convolution.weight
_snake_case = backbone.conv_stem.normalization.bias
_snake_case = backbone.conv_stem.normalization.weight
_snake_case = backbone.conv_stem.normalization.running_mean
_snake_case = backbone.conv_stem.normalization.running_var
for i in range(13 ):
_snake_case = i + 1
_snake_case = i * 2
_snake_case = backbone.layer[pt_index]
_snake_case = f"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
_snake_case = pointer.convolution.weight
_snake_case = pointer.normalization.bias
_snake_case = pointer.normalization.weight
_snake_case = pointer.normalization.running_mean
_snake_case = pointer.normalization.running_var
_snake_case = backbone.layer[pt_index + 1]
_snake_case = f"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
_snake_case = pointer.convolution.weight
_snake_case = pointer.normalization.bias
_snake_case = pointer.normalization.weight
_snake_case = pointer.normalization.running_mean
_snake_case = pointer.normalization.running_var
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
_snake_case = model.classifier.weight
_snake_case = model.classifier.bias
return tf_to_pt_map
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
_snake_case = tf.train.list_variables(_SCREAMING_SNAKE_CASE )
_snake_case = {}
for name, shape in init_vars:
logger.info(f"""Loading TF weight {name} with shape {shape}""" )
_snake_case = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_snake_case = array
# Build TF to PyTorch weights loading map
_snake_case = _build_tf_to_pytorch_map(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for name, pointer in tf_to_pt_map.items():
logger.info(f"""Importing {name}""" )
if name not in tf_weights:
logger.info(f"""{name} not in tf pre-trained weights, skipping""" )
continue
_snake_case = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
_snake_case = np.transpose(_SCREAMING_SNAKE_CASE , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
_snake_case = array.squeeze().transpose()
else:
_snake_case = np.transpose(_SCREAMING_SNAKE_CASE , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(f"""Initialize PyTorch weight {name} {array.shape}""" )
_snake_case = torch.from_numpy(_SCREAMING_SNAKE_CASE )
tf_weights.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
tf_weights.pop(name + """/RMSProp""" , _SCREAMING_SNAKE_CASE )
tf_weights.pop(name + """/RMSProp_1""" , _SCREAMING_SNAKE_CASE )
tf_weights.pop(name + """/ExponentialMovingAverage""" , _SCREAMING_SNAKE_CASE )
logger.info(f"""Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}""" )
return model
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case, _snake_case = features.shape[-2:]
_snake_case, _snake_case = conv_layer.stride
_snake_case, _snake_case = conv_layer.kernel_size
if in_height % stride_height == 0:
_snake_case = max(kernel_height - stride_height , 0 )
else:
_snake_case = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
_snake_case = max(kernel_width - stride_width , 0 )
else:
_snake_case = max(kernel_width - (in_width % stride_width) , 0 )
_snake_case = pad_along_width // 2
_snake_case = pad_along_width - pad_left
_snake_case = pad_along_height // 2
_snake_case = pad_along_height - pad_top
_snake_case = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """constant""" , 0.0 )
class _lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1 , UpperCAmelCase = 1 , UpperCAmelCase = False , UpperCAmelCase = True , UpperCAmelCase = True , ) -> None:
super().__init__()
_snake_case = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
_snake_case = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
_snake_case = nn.Convad(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , kernel_size=UpperCAmelCase , stride=UpperCAmelCase , padding=UpperCAmelCase , groups=UpperCAmelCase , bias=UpperCAmelCase , padding_mode="""zeros""" , )
if use_normalization:
_snake_case = nn.BatchNormad(
num_features=UpperCAmelCase , eps=config.layer_norm_eps , momentum=0.9997 , affine=UpperCAmelCase , track_running_stats=UpperCAmelCase , )
else:
_snake_case = None
if use_activation:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
_snake_case = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCAmelCase ):
_snake_case = ACTaFN[config.hidden_act]
else:
_snake_case = config.hidden_act
else:
_snake_case = None
def lowercase (self , UpperCAmelCase ) -> torch.Tensor:
if self.config.tf_padding:
_snake_case = apply_tf_padding(UpperCAmelCase , self.convolution )
_snake_case = self.convolution(UpperCAmelCase )
if self.normalization is not None:
_snake_case = self.normalization(UpperCAmelCase )
if self.activation is not None:
_snake_case = self.activation(UpperCAmelCase )
return features
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = MobileNetVaConfig
lowerCAmelCase_ = load_tf_weights_in_mobilenet_va
lowerCAmelCase_ = "mobilenet_v1"
lowerCAmelCase_ = "pixel_values"
lowerCAmelCase_ = False
def lowercase (self , UpperCAmelCase ) -> None:
if isinstance(UpperCAmelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCAmelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__lowerCAmelCase = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__lowerCAmelCase = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , __snake_case , )
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase = True ) -> Dict:
super().__init__(UpperCAmelCase )
_snake_case = config
_snake_case = 32
_snake_case = max(int(depth * config.depth_multiplier ) , config.min_depth )
_snake_case = MobileNetVaConvLayer(
UpperCAmelCase , in_channels=config.num_channels , out_channels=UpperCAmelCase , kernel_size=3 , stride=2 , )
_snake_case = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
_snake_case = nn.ModuleList()
for i in range(13 ):
_snake_case = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
_snake_case = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , kernel_size=3 , stride=strides[i] , groups=UpperCAmelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , kernel_size=1 , ) )
_snake_case = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def lowercase (self , UpperCAmelCase ) -> Dict:
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowercase (self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
_snake_case = self.conv_stem(UpperCAmelCase )
_snake_case = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
_snake_case = layer_module(UpperCAmelCase )
if output_hidden_states:
_snake_case = all_hidden_states + (hidden_states,)
_snake_case = hidden_states
if self.pooler is not None:
_snake_case = torch.flatten(self.pooler(UpperCAmelCase ) , start_dim=1 )
else:
_snake_case = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase , pooler_output=UpperCAmelCase , hidden_states=UpperCAmelCase , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , __snake_case , )
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , UpperCAmelCase ) -> None:
super().__init__(UpperCAmelCase )
_snake_case = config.num_labels
_snake_case = MobileNetVaModel(UpperCAmelCase )
_snake_case = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
_snake_case = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCAmelCase )
_snake_case = nn.Linear(UpperCAmelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowercase (self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.mobilenet_va(UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase )
_snake_case = outputs.pooler_output if return_dict else outputs[1]
_snake_case = self.classifier(self.dropout(UpperCAmelCase ) )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = """single_label_classification"""
else:
_snake_case = """multi_label_classification"""
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(UpperCAmelCase , UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(UpperCAmelCase , UpperCAmelCase )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCAmelCase , logits=UpperCAmelCase , hidden_states=outputs.hidden_states , ) | 585 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : Union[str, Any] = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class lowercase__ ( __A ):
__UpperCamelCase = """data2vec-vision"""
def __init__( self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=224 , _lowercase=16 , _lowercase=3 , _lowercase=False , _lowercase=False , _lowercase=False , _lowercase=False , _lowercase=0.1 , _lowercase=0.1 , _lowercase=True , _lowercase=[3, 5, 7, 11] , _lowercase=[1, 2, 3, 6] , _lowercase=True , _lowercase=0.4 , _lowercase=256 , _lowercase=1 , _lowercase=False , _lowercase=255 , **_lowercase , ):
super().__init__(**_lowercase )
lowerCAmelCase_ : Any = hidden_size
lowerCAmelCase_ : Any = num_hidden_layers
lowerCAmelCase_ : List[Any] = num_attention_heads
lowerCAmelCase_ : Optional[int] = intermediate_size
lowerCAmelCase_ : List[str] = hidden_act
lowerCAmelCase_ : str = hidden_dropout_prob
lowerCAmelCase_ : str = attention_probs_dropout_prob
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : List[str] = layer_norm_eps
lowerCAmelCase_ : str = image_size
lowerCAmelCase_ : Any = patch_size
lowerCAmelCase_ : Dict = num_channels
lowerCAmelCase_ : Tuple = use_mask_token
lowerCAmelCase_ : List[str] = use_absolute_position_embeddings
lowerCAmelCase_ : str = use_relative_position_bias
lowerCAmelCase_ : int = use_shared_relative_position_bias
lowerCAmelCase_ : Any = layer_scale_init_value
lowerCAmelCase_ : Union[str, Any] = drop_path_rate
lowerCAmelCase_ : List[Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCAmelCase_ : Any = out_indices
lowerCAmelCase_ : Optional[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase_ : Dict = use_auxiliary_head
lowerCAmelCase_ : Union[str, Any] = auxiliary_loss_weight
lowerCAmelCase_ : Dict = auxiliary_channels
lowerCAmelCase_ : Union[str, Any] = auxiliary_num_convs
lowerCAmelCase_ : List[str] = auxiliary_concat_input
lowerCAmelCase_ : int = semantic_loss_ignore_index
class lowercase__ ( __A ):
__UpperCamelCase = version.parse("""1.11""" )
@property
def UpperCAmelCase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase__ ( self ):
return 1e-4
| 440 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowercase__ ( __A ):
__UpperCamelCase = """M-CLIP"""
def __init__( self , _lowercase=1_024 , _lowercase=768 , **_lowercase ):
lowerCAmelCase_ : Tuple = transformerDimSize
lowerCAmelCase_ : Optional[Any] = imageDimSize
super().__init__(**_lowercase )
class lowercase__ ( __A ):
__UpperCamelCase = MCLIPConfig
def __init__( self , _lowercase , *_lowercase , **_lowercase ):
super().__init__(_lowercase , *_lowercase , **_lowercase )
lowerCAmelCase_ : str = XLMRobertaModel(_lowercase )
lowerCAmelCase_ : str = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ):
lowerCAmelCase_ : Optional[int] = self.transformer(input_ids=_lowercase , attention_mask=_lowercase )[0]
lowerCAmelCase_ : Any = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(_lowercase ), embs
| 440 | 1 |
from typing import Dict
from .base import GenericTensor, Pipeline
class __A ( snake_case_ ):
"""simple docstring"""
def __snake_case ( self , a__=None , a__=None , a__=None , **a__):
"""simple docstring"""
if tokenize_kwargs is None:
_lowerCamelCase : Optional[int] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''')
_lowerCamelCase : Union[str, Any] = truncation
_lowerCamelCase : Tuple = tokenize_kwargs
_lowerCamelCase : Tuple = {}
if return_tensors is not None:
_lowerCamelCase : Tuple = return_tensors
return preprocess_params, {}, postprocess_params
def __snake_case ( self , a__ , **a__):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.framework
_lowerCamelCase : List[Any] = self.tokenizer(a__ , return_tensors=a__ , **a__)
return model_inputs
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : str = self.model(**a__)
return model_outputs
def __snake_case ( self , a__ , a__=False):
"""simple docstring"""
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *a__ , **a__):
"""simple docstring"""
return super().__call__(*a__ , **a__)
| 114 | import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _lowercase :
@staticmethod
def SCREAMING_SNAKE_CASE__ ( *snake_case : Tuple , **snake_case : Union[str, Any] ) -> int:
"""simple docstring"""
pass
def __lowercase ( lowerCamelCase : Optional[Any] ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
a_ = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
lowercase = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : int = pipeline(
'document-question-answering' , model=snake_case , tokenizer=snake_case , image_processor=snake_case )
UpperCamelCase_ : Optional[Any] = INVOICE_URL
UpperCamelCase_ : List[Any] = list(zip(*apply_tesseract(load_image(snake_case ) , snake_case , '' ) ) )
UpperCamelCase_ : Optional[Any] = 'What is the placebo?'
UpperCamelCase_ : List[str] = [
{
'image': load_image(snake_case ),
'question': question,
},
{
'image': image,
'question': question,
},
{
'image': image,
'question': question,
'word_boxes': word_boxes,
},
]
return dqa_pipeline, examples
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : str , snake_case : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = dqa_pipeline(snake_case , top_k=2 )
self.assertEqual(
snake_case , [
[
{'score': ANY(snake_case ), 'answer': ANY(snake_case ), 'start': ANY(snake_case ), 'end': ANY(snake_case )},
{'score': ANY(snake_case ), 'answer': ANY(snake_case ), 'start': ANY(snake_case ), 'end': ANY(snake_case )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> str:
"""simple docstring"""
UpperCamelCase_ : Any = pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2' )
UpperCamelCase_ : Optional[int] = INVOICE_URL
UpperCamelCase_ : Any = 'How many cats are there?'
UpperCamelCase_ : Any = [
{'score': 0.0001, 'answer': 'oy 2312/2019', 'start': 3_8, 'end': 3_9},
{'score': 0.0001, 'answer': 'oy 2312/2019 DUE', 'start': 3_8, 'end': 4_0},
]
UpperCamelCase_ : Optional[Any] = dqa_pipeline(image=snake_case , question=snake_case , top_k=2 )
self.assertEqual(nested_simplify(snake_case , decimals=4 ) , snake_case )
UpperCamelCase_ : Optional[int] = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(nested_simplify(snake_case , decimals=4 ) , snake_case )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
UpperCamelCase_ : int = './tests/fixtures/tests_samples/COCO/000000039769.png'
UpperCamelCase_ : Tuple = dqa_pipeline(image=snake_case , question=snake_case , top_k=2 )
self.assertEqual(snake_case , [] )
# We can optionnally pass directly the words and bounding boxes
UpperCamelCase_ : Union[str, Any] = './tests/fixtures/tests_samples/COCO/000000039769.png'
UpperCamelCase_ : List[str] = []
UpperCamelCase_ : Optional[Any] = []
UpperCamelCase_ : int = dqa_pipeline(image=snake_case , question=snake_case , words=snake_case , boxes=snake_case , top_k=2 )
self.assertEqual(snake_case , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , )
UpperCamelCase_ : int = INVOICE_URL
UpperCamelCase_ : List[str] = 'What is the invoice number?'
UpperCamelCase_ : List[Any] = dqa_pipeline(image=snake_case , question=snake_case , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{'score': 0.9944, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0009, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
UpperCamelCase_ : int = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{'score': 0.9944, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0009, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
UpperCamelCase_ : int = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{'score': 0.9944, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0009, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Tuple = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=5_0 , )
UpperCamelCase_ : List[Any] = INVOICE_URL
UpperCamelCase_ : int = 'What is the invoice number?'
UpperCamelCase_ : Union[str, Any] = dqa_pipeline(image=snake_case , question=snake_case , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{'score': 0.9974, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.9948, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
UpperCamelCase_ : str = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{'score': 0.9974, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.9948, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
UpperCamelCase_ : Dict = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{'score': 0.9974, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.9948, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : str = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=snake_case )
UpperCamelCase_ : Tuple = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=snake_case , revision='3dc6de3' , )
UpperCamelCase_ : List[str] = INVOICE_URL
UpperCamelCase_ : Tuple = 'What is the invoice number?'
UpperCamelCase_ : Optional[Any] = dqa_pipeline(image=snake_case , question=snake_case , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{'score': 0.4251, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0819, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] , )
UpperCamelCase_ : Union[str, Any] = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{'score': 0.4251, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0819, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] , )
UpperCamelCase_ : Optional[Any] = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{'score': 0.4251, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0819, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
]
]
* 2 , )
UpperCamelCase_ : Optional[Any] = list(zip(*apply_tesseract(load_image(snake_case ) , snake_case , '' ) ) )
# This model should also work if `image` is set to None
UpperCamelCase_ : List[str] = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{'score': 0.4251, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0819, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=snake_case )
UpperCamelCase_ : Tuple = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=snake_case , revision='3dc6de3' , max_seq_len=5_0 , )
UpperCamelCase_ : Any = INVOICE_URL
UpperCamelCase_ : Optional[int] = 'What is the invoice number?'
UpperCamelCase_ : Union[str, Any] = dqa_pipeline(image=snake_case , question=snake_case , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{'score': 0.9999, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.9998, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
UpperCamelCase_ : List[Any] = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{'score': 0.9999, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.9998, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
]
]
* 2 , )
UpperCamelCase_ : Tuple = list(zip(*apply_tesseract(load_image(snake_case ) , snake_case , '' ) ) )
# This model should also work if `image` is set to None
UpperCamelCase_ : List[str] = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{'score': 0.9999, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.9998, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
@slow
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any:
"""simple docstring"""
UpperCamelCase_ : List[Any] = pipeline(
'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , )
UpperCamelCase_ : List[Any] = INVOICE_URL
UpperCamelCase_ : str = 'What is the invoice number?'
UpperCamelCase_ : Dict = dqa_pipeline(image=snake_case , question=snake_case , top_k=2 )
self.assertEqual(nested_simplify(snake_case , decimals=4 ) , [{'answer': 'us-001'}] )
@require_tf
@unittest.skip('Document question answering not implemented in TF' )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
pass
| 417 | 0 |
def _a ( _lowerCAmelCase : float ):
if edge <= 0 or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("Length must be a positive." )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def _a ( _lowerCAmelCase : float ):
if edge <= 0 or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("Length must be a positive." )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class a__ ( snake_case__ ):
def __init__( self , _A = "▁" , _A = True , _A = "<unk>" , _A = "</s>" , _A = "<pad>" , ):
"""simple docstring"""
__lowerCAmelCase = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
__lowerCAmelCase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__lowerCAmelCase = token_dict["token"]
__lowerCAmelCase = Tokenizer(Unigram() )
__lowerCAmelCase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}" ) , " " ),
normalizers.Lowercase(),
] )
__lowerCAmelCase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_A , add_prefix_space=_A ),
pre_tokenizers.Digits(individual_digits=_A ),
pre_tokenizers.Punctuation(),
] )
__lowerCAmelCase = decoders.Metaspace(replacement=_A , add_prefix_space=_A )
__lowerCAmelCase = TemplateProcessing(
single=f"""$A {self.special_tokens['eos']['token']}""" , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , )
__lowerCAmelCase = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(_A , _A )
def __SCREAMING_SNAKE_CASE( self , _A , _A = 8_0_0_0 , _A = True , ):
"""simple docstring"""
__lowerCAmelCase = trainers.UnigramTrainer(
vocab_size=_A , special_tokens=self.special_tokens_list , show_progress=_A , )
if isinstance(_A , _A ):
__lowerCAmelCase = [files]
self._tokenizer.train(_A , trainer=_A )
self.add_unk_id()
def __SCREAMING_SNAKE_CASE( self , _A , _A = 8_0_0_0 , _A = True , ):
"""simple docstring"""
__lowerCAmelCase = trainers.UnigramTrainer(
vocab_size=_A , special_tokens=self.special_tokens_list , show_progress=_A , )
self._tokenizer.train_from_iterator(_A , trainer=_A )
self.add_unk_id()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = json.loads(self._tokenizer.to_str() )
__lowerCAmelCase = self.special_tokens["unk"]["id"]
__lowerCAmelCase = Tokenizer.from_str(json.dumps(_A ) )
| 552 | 0 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = [0] * len(__SCREAMING_SNAKE_CASE )
lowercase = []
lowercase = [1] * len(__SCREAMING_SNAKE_CASE )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if indegree[i] == 0:
queue.append(__SCREAMING_SNAKE_CASE )
while queue:
lowercase = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowercase = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__SCREAMING_SNAKE_CASE )
print(max(__SCREAMING_SNAKE_CASE ) )
# Adjacency list of Graph
UpperCAmelCase = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 84 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : jnp.ndarray
_UpperCamelCase : jnp.ndarray
class A_ ( nn.Module ):
'''simple docstring'''
_UpperCamelCase : int
_UpperCamelCase : Tuple[int] = (16, 32, 96, 256)
_UpperCamelCase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase = []
for i in range(len(self.block_out_channels ) - 1 ):
lowercase = self.block_out_channels[i]
lowercase = self.block_out_channels[i + 1]
lowercase = nn.Conv(
snake_case , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case )
lowercase = nn.Conv(
snake_case , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case )
lowercase = blocks
lowercase = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case ):
lowercase = self.conv_in(snake_case )
lowercase = nn.silu(snake_case )
for block in self.blocks:
lowercase = block(snake_case )
lowercase = nn.silu(snake_case )
lowercase = self.conv_out(snake_case )
return embedding
@flax_register_to_config
class A_ ( nn.Module , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : int = 32
_UpperCamelCase : int = 4
_UpperCamelCase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_UpperCamelCase : Union[bool, Tuple[bool]] = False
_UpperCamelCase : Tuple[int] = (320, 640, 1280, 1280)
_UpperCamelCase : int = 2
_UpperCamelCase : Union[int, Tuple[int]] = 8
_UpperCamelCase : Optional[Union[int, Tuple[int]]] = None
_UpperCamelCase : int = 1280
_UpperCamelCase : float = 0.0
_UpperCamelCase : bool = False
_UpperCamelCase : jnp.dtype = jnp.floataa
_UpperCamelCase : bool = True
_UpperCamelCase : int = 0
_UpperCamelCase : str = "rgb"
_UpperCamelCase : Tuple[int] = (16, 32, 96, 256)
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# init input tensors
lowercase = (1, self.in_channels, self.sample_size, self.sample_size)
lowercase = jnp.zeros(snake_case , dtype=jnp.floataa )
lowercase = jnp.ones((1,) , dtype=jnp.intaa )
lowercase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowercase = jnp.zeros(snake_case , dtype=jnp.floataa )
lowercase , lowercase = jax.random.split(snake_case )
lowercase = {'params': params_rng, 'dropout': dropout_rng}
return self.init(snake_case , snake_case , snake_case , snake_case , snake_case )["params"]
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.block_out_channels
lowercase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowercase = self.num_attention_heads or self.attention_head_dim
# input
lowercase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowercase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowercase = FlaxTimestepEmbedding(snake_case , dtype=self.dtype )
lowercase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowercase = self.only_cross_attention
if isinstance(snake_case , snake_case ):
lowercase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(snake_case , snake_case ):
lowercase = (num_attention_heads,) * len(self.down_block_types )
# down
lowercase = []
lowercase = []
lowercase = block_out_channels[0]
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
for i, down_block_type in enumerate(self.down_block_types ):
lowercase = output_channel
lowercase = block_out_channels[i]
lowercase = i == len(snake_case ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowercase = FlaxCrossAttnDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowercase = FlaxDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(snake_case )
for _ in range(self.layers_per_block ):
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
if not is_final_block:
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
lowercase = down_blocks
lowercase = controlnet_down_blocks
# mid
lowercase = block_out_channels[-1]
lowercase = FlaxUNetMidBlockaDCrossAttn(
in_channels=snake_case , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case , snake_case , snake_case , snake_case , snake_case = 1.0 , snake_case = True , snake_case = False , ):
lowercase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowercase = jnp.flip(snake_case , axis=1 )
# 1. time
if not isinstance(snake_case , jnp.ndarray ):
lowercase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(snake_case , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowercase = timesteps.astype(dtype=jnp.floataa )
lowercase = jnp.expand_dims(snake_case , 0 )
lowercase = self.time_proj(snake_case )
lowercase = self.time_embedding(snake_case )
# 2. pre-process
lowercase = jnp.transpose(snake_case , (0, 2, 3, 1) )
lowercase = self.conv_in(snake_case )
lowercase = jnp.transpose(snake_case , (0, 2, 3, 1) )
lowercase = self.controlnet_cond_embedding(snake_case )
sample += controlnet_cond
# 3. down
lowercase = (sample,)
for down_block in self.down_blocks:
if isinstance(snake_case , snake_case ):
lowercase , lowercase = down_block(snake_case , snake_case , snake_case , deterministic=not train )
else:
lowercase , lowercase = down_block(snake_case , snake_case , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowercase = self.mid_block(snake_case , snake_case , snake_case , deterministic=not train )
# 5. contronet blocks
lowercase = ()
for down_block_res_sample, controlnet_block in zip(snake_case , self.controlnet_down_blocks ):
lowercase = controlnet_block(snake_case )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowercase = controlnet_down_block_res_samples
lowercase = self.controlnet_mid_block(snake_case )
# 6. scaling
lowercase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=snake_case , mid_block_res_sample=snake_case )
| 84 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 562 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
super().__init__()
# make sure scheduler can always be converted to DDIM
A__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 0.0 , SCREAMING_SNAKE_CASE__ = 50 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "pil" , SCREAMING_SNAKE_CASE__ = True , ) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , SCREAMING_SNAKE_CASE__ ):
A__ = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
A__ = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(SCREAMING_SNAKE_CASE__ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE__ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
A__ = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
A__ = self.unet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
A__ = self.scheduler.step(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , eta=SCREAMING_SNAKE_CASE__ , use_clipped_model_output=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).prev_sample
A__ = (image / 2 + 0.5).clamp(0 , 1 )
A__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A__ = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 562 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class _lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
_lowercase : Union[str, Any] = None
_lowercase : int = None
_lowercase : Any = None
_lowercase : Any = None
class _lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Any=0 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : str=5_1_2 , UpperCamelCase__ : int="cls" , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Optional[int]=True , **UpperCamelCase__ : Tuple , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__)
snake_case__ = project_dim
snake_case__ = pooler_fn
snake_case__ = learn_encoder
snake_case__ = use_attention_mask
class _lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
_lowercase : int = [r'''pooler''', r'''logit_scale''']
_lowercase : Dict = [r'''position_ids''', r'''predictions.decoder.bias''']
_lowercase : Tuple = '''roberta'''
_lowercase : str = RobertaSeriesConfig
def __init__( self : List[Any] , UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
super().__init__(UpperCamelCase__)
snake_case__ = XLMRobertaModel(UpperCamelCase__)
snake_case__ = nn.Linear(config.hidden_size , config.project_dim)
snake_case__ = getattr(UpperCamelCase__ , """has_pre_transformation""" , UpperCamelCase__)
if self.has_pre_transformation:
snake_case__ = nn.Linear(config.hidden_size , config.project_dim)
snake_case__ = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps)
self.post_init()
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , ):
'''simple docstring'''
snake_case__ = return_dict if return_dict is not None else self.config.use_return_dict
snake_case__ = self.base_model(
input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , position_ids=UpperCamelCase__ , head_mask=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_attentions=UpperCamelCase__ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=UpperCamelCase__ , )
if self.has_pre_transformation:
snake_case__ = outputs['hidden_states'][-2]
snake_case__ = self.pre_LN(UpperCamelCase__)
snake_case__ = self.transformation_pre(UpperCamelCase__)
return TransformationModelOutput(
projection_state=UpperCamelCase__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
snake_case__ = self.transformation(outputs.last_hidden_state)
return TransformationModelOutput(
projection_state=UpperCamelCase__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 654 |
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = 200 ) -> int:
lowerCamelCase__ : Dict = [1, 2, 5, 10, 20, 50, 100, 200]
lowerCamelCase__ : Union[str, Any] = [0] * (pence + 1)
lowerCamelCase__ : List[str] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_UpperCAmelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 295 | 0 |
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] = 50 ):
'''simple docstring'''
UpperCamelCase__ = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2, 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'{solution() = }')
| 712 | from __future__ import annotations
def lowerCamelCase_ ( UpperCamelCase__ : list[float], UpperCamelCase__ : int ):
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(UpperCamelCase__ ):
print(F"""{i}\t\t{d}""" )
def lowerCamelCase_ ( UpperCamelCase__ : list[dict[str, int]], UpperCamelCase__ : list[float], UpperCamelCase__ : int ):
'''simple docstring'''
for j in range(UpperCamelCase__ ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def lowerCamelCase_ ( UpperCamelCase__ : list[dict[str, int]], UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : int ):
'''simple docstring'''
UpperCamelCase__ = [float('''inf''' )] * vertex_count
UpperCamelCase__ = 0.0
for _ in range(vertex_count - 1 ):
for j in range(UpperCamelCase__ ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
UpperCamelCase__ = distance[u] + w
UpperCamelCase__ = check_negative_cycle(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase = int(input("""Enter number of vertices: """).strip())
lowercase = int(input("""Enter number of edges: """).strip())
lowercase = [{} for _ in range(E)]
for i in range(E):
print("""Edge """, i + 1)
lowercase , lowercase , lowercase = (
int(x)
for x in input("""Enter source, destination, weight: """).strip().split(""" """)
)
lowercase = {"""src""": src, """dst""": dest, """weight""": weight}
lowercase = int(input("""\nEnter shortest path source:""").strip())
lowercase = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 591 | 0 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
UpperCamelCase = TypeVar("T")
def A ( lowercase__ : int ) -> int:
return (position - 1) // 2
def A ( lowercase__ : int ) -> int:
return (2 * position) + 1
def A ( lowercase__ : int ) -> int:
return (2 * position) + 2
class lowerCAmelCase_ ( Generic[T] ):
"""simple docstring"""
def __init__( self :Any ):
UpperCamelCase__ :list[tuple[T, int]] = []
UpperCamelCase__ :dict[T, int] = {}
UpperCamelCase__ :int = 0
def __len__( self :Dict ):
return self.elements
def __repr__( self :Tuple ):
return str(self.heap )
def __a ( self :List[str] ):
# Check if the priority queue is empty
return self.elements == 0
def __a ( self :List[str] , lowerCamelCase__ :T , lowerCamelCase__ :int ):
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
UpperCamelCase__ :Dict = self.elements
self.elements += 1
self._bubble_up(lowerCamelCase__ )
def __a ( self :Tuple ):
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
UpperCamelCase__ , UpperCamelCase__ :Tuple = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
UpperCamelCase__ , UpperCamelCase__ :int = self.heap[0]
self._bubble_down(lowerCamelCase__ )
return elem
def __a ( self :Union[str, Any] , lowerCamelCase__ :T , lowerCamelCase__ :int ):
# Update the weight of the given key
UpperCamelCase__ :Optional[Any] = self.position_map[elem]
UpperCamelCase__ :Dict = (elem, weight)
if position > 0:
UpperCamelCase__ :Optional[Any] = get_parent_position(lowerCamelCase__ )
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(lowerCamelCase__ )
else:
self._bubble_down(lowerCamelCase__ )
else:
self._bubble_down(lowerCamelCase__ )
def __a ( self :Optional[int] , lowerCamelCase__ :T ):
# Place a node at the proper position (upward movement) [to be used internally
# only]
UpperCamelCase__ :int = self.position_map[elem]
if curr_pos == 0:
return None
UpperCamelCase__ :Any = get_parent_position(lowerCamelCase__ )
UpperCamelCase__ , UpperCamelCase__ :str = self.heap[curr_pos]
UpperCamelCase__ , UpperCamelCase__ :List[str] = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(lowerCamelCase__ , lowerCamelCase__ )
return self._bubble_up(lowerCamelCase__ )
return None
def __a ( self :Dict , lowerCamelCase__ :T ):
# Place a node at the proper position (downward movement) [to be used
# internally only]
UpperCamelCase__ :Dict = self.position_map[elem]
UpperCamelCase__ , UpperCamelCase__ :Tuple = self.heap[curr_pos]
UpperCamelCase__ :Optional[int] = get_child_left_position(lowerCamelCase__ )
UpperCamelCase__ :int = get_child_right_position(lowerCamelCase__ )
if child_left_position < self.elements and child_right_position < self.elements:
UpperCamelCase__ , UpperCamelCase__ :int = self.heap[child_left_position]
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(lowerCamelCase__ , lowerCamelCase__ )
return self._bubble_down(lowerCamelCase__ )
if child_left_position < self.elements:
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(lowerCamelCase__ , lowerCamelCase__ )
return self._bubble_down(lowerCamelCase__ )
else:
return None
if child_right_position < self.elements:
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(lowerCamelCase__ , lowerCamelCase__ )
return self._bubble_down(lowerCamelCase__ )
return None
def __a ( self :List[Any] , lowerCamelCase__ :int , lowerCamelCase__ :int ):
# Swap the nodes at the given positions
UpperCamelCase__ :Optional[int] = self.heap[nodea_pos][0]
UpperCamelCase__ :Optional[int] = self.heap[nodea_pos][0]
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
UpperCamelCase__ :Dict = nodea_pos
UpperCamelCase__ :Any = nodea_pos
class lowerCAmelCase_ ( Generic[T] ):
"""simple docstring"""
def __init__( self :str ):
UpperCamelCase__ :dict[T, dict[T, int]] = {}
UpperCamelCase__ :int = 0
def __repr__( self :int ):
return str(self.connections )
def __len__( self :Dict ):
return self.nodes
def __a ( self :List[Any] , lowerCamelCase__ :T ):
# Add a node in the graph if it is not in the graph
if node not in self.connections:
UpperCamelCase__ :Optional[int] = {}
self.nodes += 1
def __a ( self :Union[str, Any] , lowerCamelCase__ :T , lowerCamelCase__ :T , lowerCamelCase__ :int ):
# Add an edge between 2 nodes in the graph
self.add_node(lowerCamelCase__ )
self.add_node(lowerCamelCase__ )
UpperCamelCase__ :Tuple = weight
UpperCamelCase__ :int = weight
def A ( lowercase__ : GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
UpperCamelCase__ :dict[T, int] = {node: maxsize for node in graph.connections}
UpperCamelCase__ :dict[T, T | None] = {node: None for node in graph.connections}
UpperCamelCase__ :MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(lowercase__ , lowercase__ )
if priority_queue.is_empty():
return dist, parent
# initialization
UpperCamelCase__ :Optional[Any] = priority_queue.extract_min()
UpperCamelCase__ :List[str] = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCamelCase__ :str = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowercase__ , dist[neighbour] )
UpperCamelCase__ :int = node
# running prim's algorithm
while not priority_queue.is_empty():
UpperCamelCase__ :int = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCamelCase__ :int = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowercase__ , dist[neighbour] )
UpperCamelCase__ :List[Any] = node
return dist, parent | 45 |
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
_A : Dict = """facebook/wmt19-en-de"""
_A : str = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
_A : List[Any] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
_A : int = FSMTForConditionalGeneration(config)
print(F"num of params {tiny_model.num_parameters()}")
# Test
_A : Dict = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
_A : List[Any] = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
_A : List[Any] = """tiny-wmt19-en-de"""
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 361 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ : List[Any] = logging.get_logger(__name__)
__magic_name__ : Tuple = {
'''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class UpperCamelCase__ ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = 'poolformer'
def __init__( self : List[Any] , __A : Tuple=3 , __A : List[str]=1_6 , __A : List[str]=1_6 , __A : int=3 , __A : int=4.0 , __A : List[str]=[2, 2, 6, 2] , __A : int=[6_4, 1_2_8, 3_2_0, 5_1_2] , __A : Union[str, Any]=[7, 3, 3, 3] , __A : Dict=[4, 2, 2, 2] , __A : int=[2, 1, 1, 1] , __A : Any=4 , __A : Optional[Any]=0.0 , __A : List[Any]="gelu" , __A : int=True , __A : Any=1e-5 , __A : Any=0.0_2 , **__A : int , ):
"""simple docstring"""
_lowercase = num_channels
_lowercase = patch_size
_lowercase = stride
_lowercase = padding
_lowercase = pool_size
_lowercase = hidden_sizes
_lowercase = mlp_ratio
_lowercase = depths
_lowercase = patch_sizes
_lowercase = strides
_lowercase = num_encoder_blocks
_lowercase = drop_path_rate
_lowercase = hidden_act
_lowercase = use_layer_scale
_lowercase = layer_scale_init_value
_lowercase = initializer_range
super().__init__(**__A )
class UpperCamelCase__ ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = version.parse('1.11' )
@property
def snake_case ( self : Tuple ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
return 2e-3
| 602 |
'''simple docstring'''
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__magic_name__ : Optional[int] = 50_000
__magic_name__ : Tuple = 5_000
__magic_name__ , __magic_name__ : List[Any] = os.path.split(__file__)
__magic_name__ : int = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def A__ ( A_ , A_ ) -> List[str]:
for i in range(A_ ):
_lowercase = dataset[i]
@get_duration
def A__ ( A_ , A_ , A_ ) -> List[Any]:
for i in range(0 , len(A_ ) , A_ ):
_lowercase = dataset[i : i + batch_size]
@get_duration
def A__ ( A_ , A_ , A_ ) -> List[str]:
with dataset.formatted_as(type=A_ ):
for i in range(A_ ):
_lowercase = dataset[i]
@get_duration
def A__ ( A_ , A_ , A_ , A_ ) -> Tuple:
with dataset.formatted_as(type=A_ ):
for i in range(0 , A_ , A_ ):
_lowercase = dataset[i : i + batch_size]
def A__ ( ) -> int:
_lowercase = {"num examples": SPEED_TEST_N_EXAMPLES}
_lowercase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
_lowercase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
_lowercase = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
_lowercase = generate_example_dataset(
os.path.join(A_ , "dataset.arrow" ) , A_ , num_examples=A_ , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(A_ ) )
_lowercase = func(A_ , **A_ )
print("shuffling dataset" )
_lowercase = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(A_ ) )
_lowercase = func(
A_ , **A_ )
with open(A_ , "wb" ) as f:
f.write(json.dumps(A_ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 602 | 1 |
from __future__ import annotations
class _snake_case :
def __init__( self: List[Any] , __lowerCamelCase: Union[str, Any]=None ) -> Dict:
__UpperCAmelCase : List[Any] = data
__UpperCAmelCase : Tuple = None
def __repr__( self: Any ) -> Dict:
__UpperCAmelCase : Dict = []
__UpperCAmelCase : Any = self
while temp:
string_rep.append(f'''{temp.data}''' )
__UpperCAmelCase : int = temp.next
return "->".join(__lowerCamelCase )
def _UpperCamelCase ( snake_case__ ) -> List[str]:
if not elements_list:
raise Exception("The Elements List is empty" )
__UpperCAmelCase : Optional[Any] = Node(elements_list[0] )
for i in range(1, len(snake_case__ ) ):
__UpperCAmelCase : str = Node(elements_list[i] )
__UpperCAmelCase : List[Any] = current.next
return head
def _UpperCamelCase ( snake_case__ ) -> None:
if head_node is not None and isinstance(snake_case__, snake_case__ ):
print_reverse(head_node.next )
print(head_node.data )
def _UpperCamelCase ( ) -> List[str]:
from doctest import testmod
testmod()
__UpperCAmelCase : Optional[Any] = make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(snake_case__ )
print("Elements in Reverse:" )
print_reverse(snake_case__ )
if __name__ == "__main__":
main()
| 382 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class _snake_case ( _lowercase ):
lowerCamelCase__: Optional[int] = "audio-spectrogram-transformer"
def __init__( self: Any , __lowerCamelCase: Union[str, Any]=7_68 , __lowerCamelCase: List[str]=12 , __lowerCamelCase: Optional[Any]=12 , __lowerCamelCase: Optional[int]=30_72 , __lowerCamelCase: Optional[Any]="gelu" , __lowerCamelCase: Dict=0.0 , __lowerCamelCase: Optional[Any]=0.0 , __lowerCamelCase: Tuple=0.02 , __lowerCamelCase: List[Any]=1e-12 , __lowerCamelCase: Tuple=16 , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: Optional[Any]=10 , __lowerCamelCase: str=10 , __lowerCamelCase: Any=10_24 , __lowerCamelCase: Dict=1_28 , **__lowerCamelCase: Optional[Any] , ) -> List[str]:
super().__init__(**__lowerCamelCase )
__UpperCAmelCase : str = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : int = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : str = layer_norm_eps
__UpperCAmelCase : int = patch_size
__UpperCAmelCase : Any = qkv_bias
__UpperCAmelCase : str = frequency_stride
__UpperCAmelCase : Union[str, Any] = time_stride
__UpperCAmelCase : Dict = max_length
__UpperCAmelCase : Optional[Any] = num_mel_bins
| 382 | 1 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
_lowercase : Union[str, Any] =get_logger(__name__)
def A__ ( lowercase: int, lowercase: str, lowercase: Union[str, Any], lowercase: Optional[int], lowercase: Optional[int]=0 ) -> Optional[int]:
os.makedirs(lowercase, exist_ok=lowercase )
with FSDP.state_dict_type(
lowercase, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
A : List[Any] =model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
A : Optional[int] =F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin'
A : Union[str, Any] =os.path.join(lowercase, lowercase )
if accelerator.process_index == 0:
logger.info(F'Saving model to {output_model_file}' )
torch.save(lowercase, lowercase )
logger.info(F'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
A : Tuple =(
F'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
A : Dict =os.path.join(lowercase, lowercase )
logger.info(F'Saving model to {output_model_file}' )
torch.save(lowercase, lowercase )
logger.info(F'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
A : List[str] =os.path.join(lowercase, F'{MODEL_NAME}_{model_index}' )
os.makedirs(lowercase, exist_ok=lowercase )
logger.info(F'Saving model to {ckpt_dir}' )
A : Optional[int] ={'model': state_dict}
dist_cp.save_state_dict(
state_dict=lowercase, storage_writer=dist_cp.FileSystemWriter(lowercase ), planner=DefaultSavePlanner(), )
logger.info(F'Model saved to {ckpt_dir}' )
def A__ ( lowercase: Optional[int], lowercase: Dict, lowercase: Dict, lowercase: Optional[int], lowercase: Any=0 ) -> List[Any]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
lowercase, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(lowercase ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'Set the `sync_module_states` flag to `True` so that model states are synced across processes when '
'initializing FSDP object' )
return
A : List[str] =F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin'
A : Dict =os.path.join(lowercase, lowercase )
logger.info(F'Loading model from {input_model_file}' )
A : Optional[Any] =torch.load(lowercase )
logger.info(F'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
A : Dict =(
F'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
A : Optional[Any] =os.path.join(lowercase, lowercase )
logger.info(F'Loading model from {input_model_file}' )
A : int =torch.load(lowercase )
logger.info(F'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
A : List[str] =(
os.path.join(lowercase, F'{MODEL_NAME}_{model_index}' )
if F'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(F'Loading model from {ckpt_dir}' )
A : Optional[Any] ={'model': model.state_dict()}
dist_cp.load_state_dict(
state_dict=lowercase, storage_reader=dist_cp.FileSystemReader(lowercase ), planner=DefaultLoadPlanner(), )
A : Optional[Any] =state_dict['model']
logger.info(F'Model loaded from {ckpt_dir}' )
model.load_state_dict(lowercase )
def A__ ( lowercase: Any, lowercase: str, lowercase: str, lowercase: Optional[Any], lowercase: int, lowercase: List[str]=0 ) -> Optional[int]:
os.makedirs(lowercase, exist_ok=lowercase )
with FSDP.state_dict_type(
lowercase, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
A : Union[str, Any] =FSDP.optim_state_dict(lowercase, lowercase )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
A : int =(
F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
A : Union[str, Any] =os.path.join(lowercase, lowercase )
logger.info(F'Saving Optimizer state to {output_optimizer_file}' )
torch.save(lowercase, lowercase )
logger.info(F'Optimizer state saved in {output_optimizer_file}' )
else:
A : int =os.path.join(lowercase, F'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(lowercase, exist_ok=lowercase )
logger.info(F'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={'optimizer': optim_state}, storage_writer=dist_cp.FileSystemWriter(lowercase ), planner=DefaultSavePlanner(), )
logger.info(F'Optimizer state saved in {ckpt_dir}' )
def A__ ( lowercase: Tuple, lowercase: int, lowercase: int, lowercase: Optional[int], lowercase: Union[str, Any], lowercase: Optional[Any]=0 ) -> int:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
lowercase, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
A : Any =None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
A : Optional[Any] =(
F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
A : Union[str, Any] =os.path.join(lowercase, lowercase )
logger.info(F'Loading Optimizer state from {input_optimizer_file}' )
A : Tuple =torch.load(lowercase )
logger.info(F'Optimizer state loaded from {input_optimizer_file}' )
else:
A : Optional[Any] =(
os.path.join(lowercase, F'{OPTIMIZER_NAME}_{optimizer_index}' )
if F'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(F'Loading Optimizer from {ckpt_dir}' )
A : Tuple =load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict(), optimizer_key='optimizer', storage_reader=dist_cp.FileSystemReader(lowercase ), )
A : Optional[Any] =optim_state['optimizer']
logger.info(F'Optimizer loaded from {ckpt_dir}' )
A : Tuple =FSDP.optim_state_dict_to_load(lowercase, lowercase, lowercase )
optimizer.load_state_dict(lowercase )
| 661 | import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
_lowercase : str =False
_lowercase : Optional[Any] =False
def A__ ( lowercase: Namespace ) -> Optional[int]:
return TrainCommand(lowercase )
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> Dict:
A : Optional[Any] =parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=SCREAMING_SNAKE_CASE__ , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=SCREAMING_SNAKE_CASE__ , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=SCREAMING_SNAKE_CASE__ , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=SCREAMING_SNAKE_CASE__ , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=SCREAMING_SNAKE_CASE__ , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=SCREAMING_SNAKE_CASE__ , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=SCREAMING_SNAKE_CASE__ , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE__ , default=3e-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=SCREAMING_SNAKE_CASE__ , default=1e-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Namespace ) -> List[Any]:
A : Optional[int] =logging.get_logger('transformers-cli/training' )
A : Dict ='tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =args.output
A : List[str] =args.column_label
A : int =args.column_text
A : Union[str, Any] =args.column_id
self.logger.info(f'Loading {args.task} pipeline for {args.model}' )
if args.task == "text_classification":
A : Optional[Any] =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'Loading dataset from {args.train_data}' )
A : Tuple =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
A : Dict =None
if args.validation_data:
self.logger.info(f'Loading validation dataset from {args.validation_data}' )
A : List[Any] =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
A : Optional[Any] =args.validation_split
A : str =args.train_batch_size
A : Any =args.valid_batch_size
A : Dict =args.learning_rate
A : List[str] =args.adam_epsilon
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Union[str, Any]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[str]:
raise NotImplementedError
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> str:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 661 | 1 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( a__ ):
lowerCamelCase : Optional[int] =(DEISMultistepScheduler,)
lowerCamelCase : Optional[Any] =(("""num_inference_steps""", 25),)
def __a ( self , **lowerCAmelCase__ ) -> int:
a : Optional[Any] = {
"num_train_timesteps": 1000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
}
config.update(**lowerCAmelCase__ )
return config
def __a ( self , lowerCAmelCase__=0 , **lowerCAmelCase__ ) -> str:
a : int = dict(self.forward_default_kwargs )
a : Union[str, Any] = kwargs.pop("num_inference_steps" , lowerCAmelCase__ )
a : Any = self.dummy_sample
a : str = 0.1 * sample
a : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
a : List[Any] = self.get_scheduler_config(**lowerCAmelCase__ )
a : List[str] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residuals
a : Dict = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase__ )
a : Dict = scheduler_class.from_pretrained(lowerCAmelCase__ )
new_scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residuals
a : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
a, a : int = sample, sample
for t in range(lowerCAmelCase__ , time_step + scheduler.config.solver_order + 1 ):
a : int = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
a : List[str] = new_scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __a ( self ) -> str:
pass
def __a ( self , lowerCAmelCase__=0 , **lowerCAmelCase__ ) -> Dict:
a : int = dict(self.forward_default_kwargs )
a : Tuple = kwargs.pop("num_inference_steps" , lowerCAmelCase__ )
a : Optional[int] = self.dummy_sample
a : List[Any] = 0.1 * sample
a : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
a : List[Any] = self.get_scheduler_config()
a : Optional[Any] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
a : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase__ )
a : Dict = scheduler_class.from_pretrained(lowerCAmelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residual (must be after setting timesteps)
a : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
a : List[str] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
a : Optional[int] = new_scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __a ( self , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> int:
if scheduler is None:
a : List[str] = self.scheduler_classes[0]
a : List[str] = self.get_scheduler_config(**lowerCAmelCase__ )
a : str = scheduler_class(**lowerCAmelCase__ )
a : List[Any] = self.scheduler_classes[0]
a : int = self.get_scheduler_config(**lowerCAmelCase__ )
a : List[Any] = scheduler_class(**lowerCAmelCase__ )
a : List[Any] = 10
a : List[Any] = self.dummy_model()
a : List[str] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
a : str = model(lowerCAmelCase__ , lowerCAmelCase__ )
a : Tuple = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
return sample
def __a ( self ) -> Union[str, Any]:
a : Dict = dict(self.forward_default_kwargs )
a : Dict = kwargs.pop("num_inference_steps" , lowerCAmelCase__ )
for scheduler_class in self.scheduler_classes:
a : Tuple = self.get_scheduler_config()
a : int = scheduler_class(**lowerCAmelCase__ )
a : Any = self.dummy_sample
a : Any = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCAmelCase__ , "set_timesteps" ):
scheduler.set_timesteps(lowerCAmelCase__ )
elif num_inference_steps is not None and not hasattr(lowerCAmelCase__ , "set_timesteps" ):
a : Any = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
a : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
a : Union[str, Any] = scheduler.timesteps[5]
a : List[str] = scheduler.timesteps[6]
a : Tuple = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
a : Optional[int] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __a ( self ) -> Dict:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
a : List[str] = DEISMultistepScheduler(**self.get_scheduler_config() )
a : Optional[Any] = self.full_loop(scheduler=lowerCAmelCase__ )
a : List[Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_mean.item() - 0.23_916 ) < 1E-3
a : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config )
a : int = DPMSolverMultistepScheduler.from_config(scheduler.config )
a : Union[str, Any] = UniPCMultistepScheduler.from_config(scheduler.config )
a : Tuple = DEISMultistepScheduler.from_config(scheduler.config )
a : List[str] = self.full_loop(scheduler=lowerCAmelCase__ )
a : Dict = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_mean.item() - 0.23_916 ) < 1E-3
def __a ( self ) -> Dict:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def __a ( self ) -> Optional[Any]:
self.check_over_configs(thresholding=lowerCAmelCase__ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCAmelCase__ , prediction_type=lowerCAmelCase__ , sample_max_value=lowerCAmelCase__ , algorithm_type="deis" , solver_order=lowerCAmelCase__ , solver_type=lowerCAmelCase__ , )
def __a ( self ) -> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def __a ( self ) -> Optional[int]:
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCAmelCase__ , solver_type=lowerCAmelCase__ , prediction_type=lowerCAmelCase__ , algorithm_type=lowerCAmelCase__ , )
a : Optional[Any] = self.full_loop(
solver_order=lowerCAmelCase__ , solver_type=lowerCAmelCase__ , prediction_type=lowerCAmelCase__ , algorithm_type=lowerCAmelCase__ , )
assert not torch.isnan(lowerCAmelCase__ ).any(), "Samples have nan numbers"
def __a ( self ) -> Dict:
self.check_over_configs(lower_order_final=lowerCAmelCase__ )
self.check_over_configs(lower_order_final=lowerCAmelCase__ )
def __a ( self ) -> Any:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=lowerCAmelCase__ , time_step=0 )
def __a ( self ) -> Any:
a : Optional[Any] = self.full_loop()
a : Dict = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_mean.item() - 0.23_916 ) < 1E-3
def __a ( self ) -> Optional[Any]:
a : Dict = self.full_loop(prediction_type="v_prediction" )
a : int = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_mean.item() - 0.091 ) < 1E-3
def __a ( self ) -> Union[str, Any]:
a : Optional[Any] = self.scheduler_classes[0]
a : Optional[Any] = self.get_scheduler_config(thresholding=lowerCAmelCase__ , dynamic_thresholding_ratio=0 )
a : Tuple = scheduler_class(**lowerCAmelCase__ )
a : List[Any] = 10
a : Union[str, Any] = self.dummy_model()
a : Union[str, Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
a : List[Any] = model(lowerCAmelCase__ , lowerCAmelCase__ )
a : Dict = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
assert sample.dtype == torch.floataa
| 633 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a : int = logging.get_logger(__name__)
class __UpperCamelCase ( a__ ):
lowerCamelCase : Optional[int] =["""pixel_values"""]
def __init__( self , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = PIL.Image.BICUBIC , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = 1 / 255 , lowerCAmelCase__ = True , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> None:
super().__init__(**lowerCAmelCase__ )
a : Optional[int] = size if size is not None else {"height": 256, "width": 256}
a : Tuple = get_size_dict(lowerCAmelCase__ )
a : Any = crop_size if crop_size is not None else {"height": 224, "width": 224}
a : Dict = get_size_dict(lowerCAmelCase__ , param_name="crop_size" )
a : Tuple = do_resize
a : int = size
a : List[str] = resample
a : List[Any] = do_center_crop
a : List[Any] = crop_size
a : Union[str, Any] = do_rescale
a : List[Any] = rescale_factor
a : Union[str, Any] = do_normalize
a : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = PIL.Image.BICUBIC , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> np.ndarray:
a : Optional[int] = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
lowerCAmelCase__ , size=(size["height"], size["width"]) , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> np.ndarray:
a : List[str] = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(lowerCAmelCase__ , size=(size["height"], size["width"]) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Union[str, Any]:
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> np.ndarray:
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__=None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = ChannelDimension.FIRST , **lowerCAmelCase__ , ) -> PIL.Image.Image:
a : List[Any] = do_resize if do_resize is not None else self.do_resize
a : str = resample if resample is not None else self.resample
a : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
a : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
a : str = rescale_factor if rescale_factor is not None else self.rescale_factor
a : Dict = do_normalize if do_normalize is not None else self.do_normalize
a : List[str] = image_mean if image_mean is not None else self.image_mean
a : Union[str, Any] = image_std if image_std is not None else self.image_std
a : str = size if size is not None else self.size
a : Any = get_size_dict(lowerCAmelCase__ )
a : Dict = crop_size if crop_size is not None else self.crop_size
a : List[str] = get_size_dict(lowerCAmelCase__ , param_name="crop_size" )
a : str = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
a : Optional[Any] = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_resize:
a : Optional[int] = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_center_crop:
a : Dict = [self.center_crop(image=lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images]
if do_rescale:
a : Any = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_normalize:
a : Optional[Any] = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images]
a : Optional[Any] = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
a : Tuple = {"pixel_values": images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
| 633 | 1 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
lowerCAmelCase__ =pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
lowerCAmelCase__ =dataset.iloc[:, 1:2].values
lowerCAmelCase__ =dataset.iloc[:, 2].values
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ =train_test_split(X, y, test_size=0.2, random_state=0)
lowerCAmelCase__ =PolynomialFeatures(degree=4)
lowerCAmelCase__ =poly_reg.fit_transform(X)
lowerCAmelCase__ =LinearRegression()
pol_reg.fit(X_poly, y)
def _a ( ) -> List[Any]:
plt.scatter(UpperCAmelCase__ , UpperCAmelCase__ , color='''red''' )
plt.plot(UpperCAmelCase__ , pol_reg.predict(poly_reg.fit_transform(UpperCAmelCase__ ) ) , color='''blue''' )
plt.title('''Truth or Bluff (Linear Regression)''' )
plt.xlabel('''Position level''' )
plt.ylabel('''Salary''' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 690 |
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
lowerCAmelCase__ ="\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n"
lowerCAmelCase__ ="\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n"
lowerCAmelCase__ ="\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__( datasets.Metric ):
def _a ( self : Any ) -> int:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Optional[int]=1 , __SCREAMING_SNAKE_CASE : Optional[Any]="binary" , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : List[Any]="warn" , ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = recall_score(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , pos_label=__SCREAMING_SNAKE_CASE , average=__SCREAMING_SNAKE_CASE , sample_weight=__SCREAMING_SNAKE_CASE , zero_division=__SCREAMING_SNAKE_CASE , )
return {"recall": float(__SCREAMING_SNAKE_CASE ) if score.size == 1 else score}
| 690 | 1 |
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
_UpperCAmelCase : int = TypeVar('''T''')
class __magic_name__ ( Generic[T] ):
def __init__( self , snake_case_ = True ):
lowercase ={} # dictionary of lists
lowercase =directed
def _A( self , snake_case_ , snake_case_ ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case_ )
self.adj_list[destination_vertex].append(snake_case_ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case_ )
lowercase =[source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(snake_case_ )
lowercase =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
lowercase =[destination_vertex]
lowercase =[source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case_ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case_ )
lowercase =[]
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
lowercase =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
lowercase =[destination_vertex]
lowercase =[]
return self
def __repr__( self ):
return pformat(self.adj_list )
| 72 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''levit'''
def __init__( self , _UpperCAmelCase=224 , _UpperCAmelCase=3 , _UpperCAmelCase=3 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=16 , _UpperCAmelCase=[128, 256, 384] , _UpperCAmelCase=[4, 8, 12] , _UpperCAmelCase=[4, 4, 4] , _UpperCAmelCase=[16, 16, 16] , _UpperCAmelCase=0 , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=0.0_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : int = image_size
__a : List[Any] = num_channels
__a : Dict = kernel_size
__a : Optional[int] = stride
__a : Optional[int] = padding
__a : Dict = hidden_sizes
__a : int = num_attention_heads
__a : Optional[int] = depths
__a : str = key_dim
__a : Union[str, Any] = drop_path_rate
__a : Optional[Any] = patch_size
__a : Tuple = attention_ratio
__a : int = mlp_ratio
__a : int = initializer_range
__a : int = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = version.parse('''1.11''' )
@property
def _lowerCamelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowerCamelCase ( self ):
return 1e-4 | 52 | 0 |
"""simple docstring"""
import math
def lowercase__ ( snake_case_ :int ):
if not isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase = F'''Input value of [number={number}] must be an integer'''
raise TypeError(snake_case_ )
if number < 1:
__UpperCAmelCase = F'''Input value of [number={number}] must be > 0'''
raise ValueError(snake_case_ )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__UpperCAmelCase = int(math.log(number // 3 , 2 ) ) + 2
__UpperCAmelCase = [3, 5]
__UpperCAmelCase = 2
__UpperCAmelCase = 3
for block in range(1 , snake_case_ ):
for _ in range(snake_case_ ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
_lowercase : int = 0
try:
_lowercase : List[Any] = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 717 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : Tuple = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Any = "cvt"
def __init__( self : List[str] , _lowercase : str=3 , _lowercase : Tuple=[7, 3, 3] , _lowercase : Any=[4, 2, 2] , _lowercase : str=[2, 1, 1] , _lowercase : Union[str, Any]=[64, 1_92, 3_84] , _lowercase : Dict=[1, 3, 6] , _lowercase : List[str]=[1, 2, 10] , _lowercase : Optional[int]=[4.0, 4.0, 4.0] , _lowercase : Dict=[0.0, 0.0, 0.0] , _lowercase : Dict=[0.0, 0.0, 0.0] , _lowercase : Tuple=[0.0, 0.0, 0.1] , _lowercase : Dict=[True, True, True] , _lowercase : Union[str, Any]=[False, False, True] , _lowercase : Dict=["dw_bn", "dw_bn", "dw_bn"] , _lowercase : int=[3, 3, 3] , _lowercase : int=[1, 1, 1] , _lowercase : Optional[Any]=[2, 2, 2] , _lowercase : List[str]=[1, 1, 1] , _lowercase : int=[1, 1, 1] , _lowercase : Union[str, Any]=0.02 , _lowercase : Optional[Any]=1E-12 , **_lowercase : str , ):
super().__init__(**_lowercase )
__UpperCAmelCase = num_channels
__UpperCAmelCase = patch_sizes
__UpperCAmelCase = patch_stride
__UpperCAmelCase = patch_padding
__UpperCAmelCase = embed_dim
__UpperCAmelCase = num_heads
__UpperCAmelCase = depth
__UpperCAmelCase = mlp_ratio
__UpperCAmelCase = attention_drop_rate
__UpperCAmelCase = drop_rate
__UpperCAmelCase = drop_path_rate
__UpperCAmelCase = qkv_bias
__UpperCAmelCase = cls_token
__UpperCAmelCase = qkv_projection_method
__UpperCAmelCase = kernel_qkv
__UpperCAmelCase = padding_kv
__UpperCAmelCase = stride_kv
__UpperCAmelCase = padding_q
__UpperCAmelCase = stride_q
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
| 397 | 0 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, *A, **A ):
'''simple docstring'''
super().__init__(*A, **A )
requires_backends(self, 'vision' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def UpperCamelCase_ ( self, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = {}
if top_k is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = top_k
return {}, {}, postprocess_params
def __call__( self, A, **A ):
'''simple docstring'''
return super().__call__(A, **A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = load_image(A )
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor(images=A, return_tensors=self.framework )
return model_inputs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model(**A )
return model_outputs
def UpperCamelCase_ ( self, A, A=5 ):
'''simple docstring'''
if top_k > self.model.config.num_labels:
SCREAMING_SNAKE_CASE : int = self.model.config.num_labels
if self.framework == "pt":
SCREAMING_SNAKE_CASE : str = model_outputs.logits.softmax(-1 )[0]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = probs.topk(A )
elif self.framework == "tf":
SCREAMING_SNAKE_CASE : Optional[Any] = stable_softmax(model_outputs.logits, axis=-1 )[0]
SCREAMING_SNAKE_CASE : int = tf.math.top_k(A, k=A )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
SCREAMING_SNAKE_CASE : Optional[int] = scores.tolist()
SCREAMING_SNAKE_CASE : str = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(A, A )]
| 28 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
a__ : Optional[int] = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
snake_case_ = 'facebook/nllb-200-distilled-600M'
snake_case_ = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
snake_case_ = 'translator'
snake_case_ = AutoTokenizer
snake_case_ = AutoModelForSeqaSeqLM
snake_case_ = LANGUAGE_CODES
snake_case_ = ['text', 'text', 'text']
snake_case_ = ['text']
def _UpperCamelCase ( self : Dict , a_ : Union[str, Any] , a_ : Optional[Any] , a_ : List[Any] ):
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(F'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'''{tgt_lang} is not a supported language.''' )
lowerCamelCase__ = self.lang_to_code[src_lang]
lowerCamelCase__ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
a_ , return_tensors="""pt""" , src_lang=a_ , tgt_lang=a_ )
def _UpperCamelCase ( self : Optional[int] , a_ : Optional[int] ):
"""simple docstring"""
return self.model.generate(**a_ )
def _UpperCamelCase ( self : List[str] , a_ : str ):
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=a_ )
| 165 | 0 |
'''simple docstring'''
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
UpperCamelCase_ = {
"""bart""": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""bert""": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-base-cased-finetuned-mrpc""": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""dpr""": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""gpt2""": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlnet""": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm""": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm-roberta""": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""transfo-xl""": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""openai-gpt""": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""roberta""": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""layoutlm""": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""roberta-large-mnli""": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""camembert""": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""flaubert""": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert""": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert-base-distilled-squad""": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert""": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert-visual-feature-encoder""": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""ctrl""": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""albert""": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""t5""": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""electra""": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""wav2vec2""": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Dict=False , __magic_name__ : Dict=True ) -> Dict:
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowercase , lowercase , lowercase , lowercase : List[str] =MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
lowercase : Any =cached_file(__magic_name__ , __magic_name__ , force_download=not use_cached_models )
lowercase : Optional[int] =config_class.from_json_file(__magic_name__ )
lowercase : Optional[Any] =True
lowercase : str =True
print(f'''Building TensorFlow model from configuration: {config}''' )
lowercase : Dict =model_class(__magic_name__ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
lowercase : List[str] =cached_file(
__magic_name__ , __magic_name__ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
lowercase : Optional[Any] =load_pytorch_checkpoint_in_tfa_model(__magic_name__ , __magic_name__ )
if compare_with_pt_model:
lowercase : int =tf_model(tf_model.dummy_inputs , training=__magic_name__ ) # build the network
lowercase : int =torch.load(__magic_name__ , map_location='''cpu''' )
lowercase : Any =pt_model_class.from_pretrained(
pretrained_model_name_or_path=__magic_name__ , config=__magic_name__ , state_dict=__magic_name__ )
with torch.no_grad():
lowercase : int =pt_model(**pt_model.dummy_inputs )
lowercase : int =pto[0].numpy()
lowercase : int =tfo[0].numpy()
lowercase : Dict =np.amax(np.abs(np_pt - np_tf ) )
print(f'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2E-2, f'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(f'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(__magic_name__ , save_format='''h5''' )
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : List[str] , __magic_name__ : Any=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : List[str]=False , __magic_name__ : Union[str, Any]=False , __magic_name__ : Tuple=False , __magic_name__ : str=False , ) -> Tuple:
if args_model_type is None:
lowercase : Union[str, Any] =list(MODEL_CLASSES.keys() )
else:
lowercase : Optional[Any] =[args_model_type]
for j, model_type in enumerate(__magic_name__ , start=1 ):
print('''=''' * 100 )
print(f''' Converting model type {j}/{len(__magic_name__ )}: {model_type}''' )
print('''=''' * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowercase , lowercase , lowercase , lowercase , lowercase : str =MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
lowercase : List[Any] =list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
lowercase : int =model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(__magic_name__ , __magic_name__ ) , start=1 ):
print('''-''' * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
lowercase : List[str] =model_shortcut_name
elif only_convert_finetuned_models:
print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
f''' Converting checkpoint {i}/{len(__magic_name__ )}: {model_shortcut_name} - model_type {model_type}''' )
print('''-''' * 100 )
if config_shortcut_name in aws_config_map:
lowercase : int =cached_file(__magic_name__ , __magic_name__ , force_download=not use_cached_models )
else:
lowercase : str =config_shortcut_name
if model_shortcut_name in aws_model_maps:
lowercase : Optional[Any] =cached_file(__magic_name__ , __magic_name__ , force_download=not use_cached_models )
else:
lowercase : Optional[int] =model_shortcut_name
if os.path.isfile(__magic_name__ ):
lowercase : Tuple ='''converted_model'''
convert_pt_checkpoint_to_tf(
model_type=__magic_name__ , pytorch_checkpoint_path=__magic_name__ , config_file=__magic_name__ , tf_dump_path=os.path.join(__magic_name__ , model_shortcut_name + '''-tf_model.h5''' ) , compare_with_pt_model=__magic_name__ , )
if remove_cached_files:
os.remove(__magic_name__ )
os.remove(__magic_name__ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file."""
)
parser.add_argument(
"""--model_type""",
default=None,
type=str,
help=(
f'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '''
"""convert all the models from AWS."""
),
)
parser.add_argument(
"""--pytorch_checkpoint_path""",
default=None,
type=str,
help=(
"""Path to the PyTorch checkpoint path or shortcut name to download from AWS. """
"""If not given, will download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
help=(
"""The config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture. If not given and """
"""--pytorch_checkpoint_path is not given or is a shortcut name """
"""use the configuration associated to the shortcut name on the AWS"""
),
)
parser.add_argument(
"""--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions."""
)
parser.add_argument(
"""--use_cached_models""",
action="""store_true""",
help="""Use cached models if possible instead of updating to latest checkpoint versions.""",
)
parser.add_argument(
"""--remove_cached_files""",
action="""store_true""",
help="""Remove pytorch models after conversion (save memory when converting in batches).""",
)
parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""")
UpperCamelCase_ = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 88 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowerCAmelCase ( ) -> List[Any]:
lowercase : Tuple =HfArgumentParser(__magic_name__ )
lowercase : Union[str, Any] =parser.parse_args_into_dataclasses()[0]
lowercase : Any =TensorFlowBenchmark(args=__magic_name__ )
try:
lowercase : List[Any] =parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase : List[Any] ='''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
lowercase : Any =''' '''.join(str(__magic_name__ ).split(''' ''' )[:-1] )
lowercase : Optional[Any] =''''''
lowercase : List[str] =eval(str(__magic_name__ ).split(''' ''' )[-1] )
lowercase : Optional[Any] =[]
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__magic_name__ )
if len(__magic_name__ ) > 0:
lowercase : int =full_error_msg + begin_error_msg + str(__magic_name__ )
raise ValueError(__magic_name__ )
benchmark.run()
if __name__ == "__main__":
main()
| 88 | 1 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
a_ = 2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
a_ = 5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
a_ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
__lowercase : int = len([g for position, g in enumerate(__UpperCamelCase ) if g == main_target[position]] )
return (item, float(__UpperCamelCase ))
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
__lowercase : int = random.randint(0 , len(__UpperCamelCase ) - 1 )
__lowercase : Optional[Any] = parent_a[:random_slice] + parent_a[random_slice:]
__lowercase : Dict = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
__lowercase : Optional[int] = list(__UpperCamelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__lowercase : Optional[Any] = random.choice(__UpperCamelCase )
return "".join(__UpperCamelCase )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
__lowercase : Dict = []
# Generate more children proportionally to the fitness score.
__lowercase : Tuple = int(parent_a[1] * 1_00 ) + 1
__lowercase : str = 10 if child_n >= 10 else child_n
for _ in range(__UpperCamelCase ):
__lowercase : Union[str, Any] = population_score[random.randint(0 , __UpperCamelCase )][0]
__lowercase ,__lowercase : List[str] = crossover(parent_a[0] , __UpperCamelCase )
# Append new string to the population list.
pop.append(mutate(__UpperCamelCase , __UpperCamelCase ) )
pop.append(mutate(__UpperCamelCase , __UpperCamelCase ) )
return pop
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
__lowercase : Optional[Any] = f"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(__UpperCamelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
__lowercase : Optional[int] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__lowercase : Union[str, Any] = f"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(__UpperCamelCase )
# Generate random starting population.
__lowercase : Dict = []
for _ in range(__UpperCamelCase ):
population.append(''''''.join([random.choice(__UpperCamelCase ) for i in range(len(__UpperCamelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
__lowercase ,__lowercase : Tuple = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__UpperCamelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__lowercase : str = [evaluate(__UpperCamelCase , __UpperCamelCase ) for item in population]
# Check if there is a matching evolution.
__lowercase : Union[str, Any] = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] , reverse=__UpperCamelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"""\nGeneration: {generation}"""
f"""\nTotal Population:{total_population}"""
f"""\nBest score: {population_score[0][1]}"""
f"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__lowercase : str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__UpperCamelCase )
# Normalize population score to be between 0 and 1.
__lowercase : List[Any] = [
(item, score / len(__UpperCamelCase )) for item, score in population_score
]
# This is selection
for i in range(__UpperCamelCase ):
population.extend(select(population_score[int(__UpperCamelCase )] , __UpperCamelCase , __UpperCamelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__UpperCamelCase ) > N_POPULATION:
break
if __name__ == "__main__":
a_ = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
a_ = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
a_ , a_ , a_ = basic(target_str, genes_list)
print(
F"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"
)
| 76 |
'''simple docstring'''
import operator as op
def lowerCAmelCase_ ( __A : int ):
'''simple docstring'''
snake_case: List[Any] = []
snake_case: Optional[Any] = lambda __A , __A : int(x / y ) # noqa: E731 integer division operation
snake_case: Dict = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(12 ) , 'Stack' , sep=' | ' )
print('-' * (30 + len(__A )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__A ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(12 ) , ','.join(__A ) , sep=' | ' )
else:
snake_case: Tuple = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(12 ) , ','.join(__A ) , sep=' | ' )
snake_case: Any = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(12 ) , ','.join(__A ) , sep=' | ' )
stack.append(
str(opr[x](int(__A ) , int(__A ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(12 ) , ','.join(__A ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
__UpperCAmelCase = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ")
print("\n\tResult = ", solve(Postfix)) | 329 | 0 |
from PIL import Image
def _lowerCamelCase ( snake_case ):
_lowerCAmelCase = image.size
_lowerCAmelCase = 0
_lowerCAmelCase = image.load()
for i in range(snake_case_ ):
for j in range(snake_case_ ):
_lowerCAmelCase = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(snake_case_ ):
for i in range(snake_case_ ):
_lowerCAmelCase = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_lowercase: Dict = mean_threshold(Image.open('''path_to_image''').convert('''L'''))
image.save('''output_image_path''')
| 703 | import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase__ :
def __init__( self : Optional[Any] , lowercase__ : str , lowercase__ : Dict=13 , lowercase__ : Tuple=30 , lowercase__ : Optional[int]=2 , lowercase__ : Tuple=3 , lowercase__ : Dict=True , lowercase__ : Any=True , lowercase__ : int=32 , lowercase__ : int=5 , lowercase__ : Union[str, Any]=4 , lowercase__ : Tuple=37 , lowercase__ : Dict="gelu" , lowercase__ : int=0.1 , lowercase__ : Dict=0.1 , lowercase__ : Any=10 , lowercase__ : Tuple=0.0_2 , lowercase__ : str=3 , lowercase__ : Tuple=0.6 , lowercase__ : Tuple=None , ):
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = mask_ratio
_lowerCAmelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCAmelCase = (image_size // patch_size) ** 2
_lowerCAmelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : int ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def SCREAMING_SNAKE_CASE__ ( self : Dict , lowercase__ : Any , lowercase__ : Optional[Any] , lowercase__ : Dict ):
_lowerCAmelCase = ViTMAEModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : str , lowercase__ : List[Any] , lowercase__ : List[str] , lowercase__ : Union[str, Any] ):
_lowerCAmelCase = ViTMAEForPreTraining(lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(lowercase__ )
_lowerCAmelCase = (self.image_size // self.patch_size) ** 2
_lowerCAmelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCAmelCase = 1
_lowerCAmelCase = ViTMAEForPreTraining(lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase = model(lowercase__ )
_lowerCAmelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =(ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
UpperCamelCase__ ={"feature-extraction": ViTMAEModel} if is_torch_available() else {}
UpperCamelCase__ =False
UpperCamelCase__ =False
UpperCamelCase__ =False
UpperCamelCase__ =False
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase = ViTMAEModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self : int ):
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(lowercase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(lowercase__ )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , lowercase__ : Optional[Any] , lowercase__ : int , lowercase__ : Optional[Any] ):
# make masks reproducible
np.random.seed(2 )
_lowerCAmelCase = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
_lowerCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCAmelCase = torch.from_numpy(lowercase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCAmelCase = pt_noise
super().check_pt_tf_models(lowercase__ , lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
_lowerCAmelCase = outputs[0].cpu().numpy()
_lowerCAmelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase__ )
_lowerCAmelCase = model_class.from_pretrained(lowercase__ )
model.to(lowercase__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
# Make sure we don't have nans
_lowerCAmelCase = after_outputs[0].cpu().numpy()
_lowerCAmelCase = 0
_lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase__ , 1e-5 )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = ViTMAEModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def _lowerCamelCase ( ):
_lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
_lowerCAmelCase = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(lowercase__ )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=lowercase__ , return_tensors='pt' ).to(lowercase__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCAmelCase = ViTMAEConfig()
_lowerCAmelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCAmelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
_lowerCAmelCase = model(**lowercase__ , noise=torch.from_numpy(lowercase__ ).to(device=lowercase__ ) )
# verify the logits
_lowerCAmelCase = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , lowercase__ )
_lowerCAmelCase = torch.tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowercase__ ) , atol=1e-4 ) )
| 225 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase = 1_000_000 ) -> int:
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = {1: 1}
for inputa in range(2 , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
SCREAMING_SNAKE_CASE__ = (3 * number) + 1
counter += 1
if inputa not in counters:
SCREAMING_SNAKE_CASE__ = counter
if counter > pre_counter:
SCREAMING_SNAKE_CASE__ = inputa
SCREAMING_SNAKE_CASE__ = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 159 |
import math
snake_case__ = 10
snake_case__ = 7
snake_case__ = BALLS_PER_COLOUR * NUM_COLOURS
def lowerCamelCase__ ( a : int = 20 ) -> str:
"""simple docstring"""
a__ :List[str] = math.comb(a , a )
a__ :Optional[int] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , a )
a__ :Union[str, Any] = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 395 | 0 |
from __future__ import annotations
import os
from collections.abc import Mapping
_snake_case = tuple[int, int]
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
__UpperCamelCase = vertices
__UpperCamelCase = {
(min(snake_case__ ), max(snake_case__ )): weight for edge, weight in edges.items()
}
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__UpperCamelCase = weight
def __lowercase( self ) -> Tuple:
__UpperCamelCase = Graph({min(self.vertices )} , {} )
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
while len(subgraph.vertices ) < len(self.vertices ):
__UpperCamelCase = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__UpperCamelCase = edge
__UpperCamelCase = weight
subgraph.add_edge(snake_case__ , snake_case__ )
return subgraph
def _a ( __lowercase = "p107_network.txt" ) -> int:
"""simple docstring"""
__UpperCamelCase = os.path.abspath(os.path.dirname(__lowercase ) )
__UpperCamelCase = os.path.join(__lowercase , __lowercase )
__UpperCamelCase = {}
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
with open(__lowercase ) as f:
__UpperCamelCase = f.read().strip().split('\n' )
__UpperCamelCase = [line.split(',' ) for line in data]
for edgea in range(1 , len(__lowercase ) ):
for edgea in range(__lowercase ):
if adjaceny_matrix[edgea][edgea] != "-":
__UpperCamelCase = int(adjaceny_matrix[edgea][edgea] )
__UpperCamelCase = Graph(set(range(len(__lowercase ) ) ) , __lowercase )
__UpperCamelCase = graph.prims_algorithm()
__UpperCamelCase = sum(graph.edges.values() )
__UpperCamelCase = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 719 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=__lowercase , default=__lowercase , required=__lowercase , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=__lowercase , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=__lowercase , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=__lowercase , default=42 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=__lowercase , default=0 , help='cuda_id.' , )
__UpperCamelCase = parser.parse_args()
return args
def _a ( __lowercase , __lowercase , __lowercase ) -> Any:
"""simple docstring"""
if not len(__lowercase ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
__UpperCamelCase , __UpperCamelCase = imgs[0].size
__UpperCamelCase = Image.new('RGB' , size=(cols * w, rows * h) )
__UpperCamelCase , __UpperCamelCase = grid.size
for i, img in enumerate(__lowercase ):
grid.paste(__lowercase , box=(i % cols * w, i // cols * h) )
return grid
def _a ( __lowercase , __lowercase="robotic cat with wings" , __lowercase=7.5 , __lowercase=50 , __lowercase=1 , __lowercase=42 , ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = torch.Generator(pipeline.device ).manual_seed(__lowercase )
__UpperCamelCase = pipeline(
__lowercase , guidance_scale=__lowercase , num_inference_steps=__lowercase , generator=__lowercase , num_images_per_prompt=__lowercase , ).images
__UpperCamelCase = int(math.sqrt(__lowercase ) )
__UpperCamelCase = image_grid(__lowercase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
_snake_case = parse_args()
# Load models and create wrapper for stable diffusion
_snake_case = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
_snake_case = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
_snake_case = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
_snake_case = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
_snake_case = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
_snake_case = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
_snake_case = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
_snake_case = unet.to(torch.device('cuda', args.cuda_id))
_snake_case = pipeline.to(unet.device)
_snake_case , _snake_case = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
_snake_case = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 567 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.