code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__a: Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["pixel_values"]
def __init__( self , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = PILImageResampling.BICUBIC , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = 1 / 255 , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , **__lowerCAmelCase , ) -> str:
super().__init__(**_snake_case )
lowercase__ : List[str] = size if size is not None else {'''shortest_edge''': 224}
lowercase__ : Optional[Any] = get_size_dict(_snake_case , default_to_square=_snake_case )
lowercase__ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase__ : str = get_size_dict(_snake_case , default_to_square=_snake_case , param_name='''crop_size''' )
lowercase__ : List[Any] = do_resize
lowercase__ : Any = size
lowercase__ : Tuple = resample
lowercase__ : Dict = do_center_crop
lowercase__ : Optional[Any] = crop_size
lowercase__ : List[Any] = do_rescale
lowercase__ : Tuple = rescale_factor
lowercase__ : List[Any] = do_normalize
lowercase__ : str = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase__ : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase__ : Dict = do_convert_rgb
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = PILImageResampling.BICUBIC , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> int:
lowercase__ : Dict = get_size_dict(_snake_case , default_to_square=_snake_case )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowercase__ : Tuple = get_resize_output_image_size(_snake_case , size=size['''shortest_edge'''] , default_to_square=_snake_case )
return resize(_snake_case , size=_snake_case , resample=_snake_case , data_format=_snake_case , **_snake_case )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> Dict:
lowercase__ : str = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(_snake_case , size=(size['''height'''], size['''width''']) , data_format=_snake_case , **_snake_case )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> int:
return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> Dict:
return normalize(_snake_case , mean=_snake_case , std=_snake_case , data_format=_snake_case , **_snake_case )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = ChannelDimension.FIRST , **__lowerCAmelCase , ) -> Any:
lowercase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
lowercase__ : Any = size if size is not None else self.size
lowercase__ : int = get_size_dict(_snake_case , param_name='''size''' , default_to_square=_snake_case )
lowercase__ : List[Any] = resample if resample is not None else self.resample
lowercase__ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ : Any = crop_size if crop_size is not None else self.crop_size
lowercase__ : List[Any] = get_size_dict(_snake_case , param_name='''crop_size''' , default_to_square=_snake_case )
lowercase__ : str = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : Any = image_mean if image_mean is not None else self.image_mean
lowercase__ : Any = image_std if image_std is not None else self.image_std
lowercase__ : Any = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase__ : str = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase__ : Optional[Any] = [convert_to_rgb(_snake_case ) for image in images]
# All transformations expect numpy arrays.
lowercase__ : List[str] = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
lowercase__ : Optional[int] = [self.resize(image=_snake_case , size=_snake_case , resample=_snake_case ) for image in images]
if do_center_crop:
lowercase__ : Optional[int] = [self.center_crop(image=_snake_case , size=_snake_case ) for image in images]
if do_rescale:
lowercase__ : Optional[Any] = [self.rescale(image=_snake_case , scale=_snake_case ) for image in images]
if do_normalize:
lowercase__ : Dict = [self.normalize(image=_snake_case , mean=_snake_case , std=_snake_case ) for image in images]
lowercase__ : Any = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images]
lowercase__ : Union[str, Any] = {'''pixel_values''': images}
return BatchFeature(data=_snake_case , tensor_type=_snake_case )
| 152 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__UpperCamelCase : Optional[int] = pytest.mark.integration
@require_faiss
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_snake_case ) for x in np.arange(30 ).tolist()]} )
return dset
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = self._create_dummy_dataset()
lowerCAmelCase = dset.map(
lambda _snake_case , _snake_case : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_snake_case , keep_in_memory=_snake_case )
lowerCAmelCase = dset.add_faiss_index('vecs' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_snake_case ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(_snake_case , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
from elasticsearch import Elasticsearch
lowerCAmelCase = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCAmelCase = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
lowerCAmelCase = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=_snake_case )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase = 1
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case )
self.assertRaises(_snake_case , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCAmelCase = np.eye(5 , dtype=np.floataa )[::-1]
lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case )
self.assertRaises(_snake_case , index.search_batch , queries[0] )
lowerCAmelCase = [scores[0] for scores in total_scores]
lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_snake_case ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCAmelCase = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(_snake_case ):
lowerCAmelCase = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = faiss.IndexFlat(5 )
lowerCAmelCase = FaissIndex(custom_index=_snake_case )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_snake_case ) as tmp_file:
index.save(tmp_file.name )
lowerCAmelCase = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase = 1
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict ):
import faiss
lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCAmelCase = 'index.faiss'
lowerCAmelCase = F'mock://{index_name}'
index.save(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCAmelCase = FaissIndex.load(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase = 1
lowerCAmelCase ,lowerCAmelCase = index.search(_UpperCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCAmelCase = Elasticsearch()
lowerCAmelCase = {'acknowledged': True}
lowerCAmelCase = ElasticSearchIndex(es_client=_snake_case )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
lowerCAmelCase = 'foo'
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCAmelCase = 'foo'
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCAmelCase = ['foo', 'bar', 'foobar']
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case )
lowerCAmelCase = [scores[0] for scores in total_scores]
lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_snake_case ) , 0 )
self.assertListEqual([1, 1, 1] , _snake_case )
# batched queries with timeout
lowerCAmelCase = ['foo', 'bar', 'foobar']
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case , request_timeout=30 )
lowerCAmelCase = [scores[0] for scores in total_scores]
lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_snake_case ) , 0 )
self.assertListEqual([1, 1, 1] , _snake_case )
| 4 | 0 |
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def A_ ( snake_case_ : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(_UpperCAmelCase ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A_ ( ):
'''simple docstring'''
UpperCamelCase : int = 2
while True:
if is_prime(_UpperCAmelCase ):
yield num
num += 1
def A_ ( snake_case_ : int = 2_0_0_0_0_0_0 ):
'''simple docstring'''
return sum(takewhile(lambda snake_case_ : x < n ,prime_generator() ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 499 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = IFInpaintingPipeline
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._get_dummy_components()
def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(_snake_case )
else:
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_local()
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 4 | 0 |
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__UpperCAmelCase = [
'''cross_validation.py''',
'''gradient_accumulation.py''',
'''local_sgd.py''',
'''multi_process_metrics.py''',
'''memory.py''',
'''automatic_gradient_accumulation.py''',
'''fsdp_with_peak_mem_tracking.py''',
'''deepspeed_with_config_support.py''',
'''megatron_lm_gpt_pretraining.py''',
]
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Optional[Any] = None ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = None
__lowerCAmelCase : int = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) )
__lowerCAmelCase : str = os.path.abspath("""examples""" )
for item in os.listdir(_snake_case ):
if item not in EXCLUDE_EXAMPLES:
__lowerCAmelCase : Dict = os.path.join(_snake_case , _snake_case )
if os.path.isfile(_snake_case ) and ".py" in item_path:
with self.subTest(
tested_script=_snake_case , feature_script=_snake_case , tested_section="""main()""" if parser_only else """training_function()""" , ):
__lowerCAmelCase : List[Any] = compare_against_test(
os.path.join(_snake_case , _snake_case ) , _snake_case , _snake_case , _snake_case )
__lowerCAmelCase : List[str] = """\n""".join(_snake_case )
if special_strings is not None:
for string in special_strings:
__lowerCAmelCase : List[Any] = diff.replace(_snake_case , """""" )
self.assertEqual(_snake_case , """""" )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
"""simple docstring"""
self.one_complete_example("""complete_nlp_example.py""" , _snake_case )
self.one_complete_example("""complete_nlp_example.py""" , _snake_case )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Tuple = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) )
__lowerCAmelCase : Union[str, Any] = [
""" """ * 16 + """{\n\n""",
""" """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 20 + """\"epoch\": epoch,\n\n""",
""" """ * 16 + """},\n\n""",
""" """ * 16 + """step=epoch,\n""",
""" """ * 12,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""" , _snake_case , _snake_case , _snake_case )
self.one_complete_example("""complete_cv_example.py""" , _snake_case , _snake_case , _snake_case )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class SCREAMING_SNAKE_CASE ( a__ ):
"""simple docstring"""
lowerCamelCase : Optional[int] =False
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
super().setUpClass()
__lowerCAmelCase : int = tempfile.mkdtemp()
__lowerCAmelCase : str = os.path.join(cls._tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
__lowerCAmelCase : str = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Dict ) -> Union[str, Any]:
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[Any] = f'''\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = f'''\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '''.split()
__lowerCAmelCase : Any = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = f'''\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n '''.split()
__lowerCAmelCase : Optional[int] = run_command(self._launch_args + testargs , return_stdout=_snake_case )
self.assertNotIn("""epoch 0:""" , _snake_case )
self.assertIn("""epoch 1:""" , _snake_case )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = f'''\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n '''.split()
__lowerCAmelCase : int = run_command(self._launch_args + testargs , return_stdout=_snake_case )
if torch.cuda.is_available():
__lowerCAmelCase : Union[str, Any] = torch.cuda.device_count()
else:
__lowerCAmelCase : int = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , _snake_case )
self.assertIn("""epoch 1:""" , _snake_case )
else:
self.assertIn("""epoch 0:""" , _snake_case )
self.assertIn("""epoch 1:""" , _snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = """\n examples/by_feature/cross_validation.py\n --num_folds 2\n """.split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
__lowerCAmelCase : int = run_command(self._launch_args + testargs , return_stdout=_snake_case )
__lowerCAmelCase : Tuple = re.findall("""({.+})""" , _snake_case )
__lowerCAmelCase : Optional[int] = [r for r in results if """accuracy""" in r][-1]
__lowerCAmelCase : Optional[Any] = ast.literal_eval(_snake_case )
self.assertGreaterEqual(results["""accuracy"""] , 0.75 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
"""simple docstring"""
__lowerCAmelCase : List[Any] = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
__lowerCAmelCase : Dict = f'''\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(_snake_case , """tracking""" ) ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowerCAmelCase : List[str] = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs )
| 651 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class a :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
lowerCAmelCase = self.vocab_size - 1
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , head_mask=_snake_case )
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTLMHeadModel(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTDoubleHeadsModel(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = OpenAIGPTForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class a ( a__ , a__ , a__ , unittest.TestCase ):
snake_case__ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
snake_case__ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
snake_case__ = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case=False ):
"""simple docstring"""
lowerCAmelCase = super()._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_snake_case , )
lowerCAmelCase = inputs_dict['labels']
lowerCAmelCase = inputs_dict['labels']
lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_snake_case , )
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_snake_case )
return inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , n_embd=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = OpenAIGPTModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@require_torch
class a ( unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(_snake_case )
lowerCAmelCase = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=_snake_case ) # the president is
lowerCAmelCase = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCAmelCase = model.generate(_snake_case , do_sample=_snake_case )
self.assertListEqual(output_ids[0].tolist() , _snake_case )
| 4 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( a__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = DanceDiffusionPipeline
__SCREAMING_SNAKE_CASE : Tuple = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__SCREAMING_SNAKE_CASE : List[Any] = PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
__SCREAMING_SNAKE_CASE : int = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : List[Any] = False
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16_000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_snake_case , use_timestep_embedding=_snake_case , time_embedding_type="""fourier""" , mid_block_type="""UNetMidBlock1D""" , down_block_types=("""DownBlock1DNoSkip""", """DownBlock1D""", """AttnDownBlock1D""") , up_block_types=("""AttnUpBlock1D""", """UpBlock1D""", """UpBlock1DNoSkip""") , )
lowercase_ = IPNDMScheduler()
lowercase_ = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int]=0 ):
'''simple docstring'''
if str(_snake_case ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(_snake_case )
else:
lowercase_ = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowercase_ = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 4,
}
return inputs
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.get_dummy_components()
lowercase_ = DanceDiffusionPipeline(**_snake_case )
lowercase_ = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase_ = self.get_dummy_inputs(_snake_case )
lowercase_ = pipe(**_snake_case )
lowercase_ = output.audios
lowercase_ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowercase_ = np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = torch_device
lowercase_ = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" )
lowercase_ = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(generator=_snake_case , num_inference_steps=100 , audio_length_in_s=4.096 )
lowercase_ = output.audios
lowercase_ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowercase_ = np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = torch_device
lowercase_ = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" , torch_dtype=torch.floataa )
lowercase_ = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(generator=_snake_case , num_inference_steps=100 , audio_length_in_s=4.096 )
lowercase_ = output.audios
lowercase_ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowercase_ = np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 412 |
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__UpperCamelCase : str = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path' , type=_UpperCAmelCase , default='data/dump.txt' , help='The path to the data.' )
parser.add_argument('--tokenizer_type' , type=_UpperCAmelCase , default='bert' , choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name' , type=_UpperCAmelCase , default='bert-base-uncased' , help='The tokenizer to use.' )
parser.add_argument('--dump_file' , type=_UpperCAmelCase , default='data/dump' , help='The dump file prefix.' )
lowerCAmelCase = parser.parse_args()
logger.info(F'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
lowerCAmelCase = BertTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase = tokenizer.special_tokens_map['cls_token'] # `[CLS]`
lowerCAmelCase = tokenizer.special_tokens_map['sep_token'] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowerCAmelCase = RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase = tokenizer.special_tokens_map['cls_token'] # `<s>`
lowerCAmelCase = tokenizer.special_tokens_map['sep_token'] # `</s>`
elif args.tokenizer_type == "gpt2":
lowerCAmelCase = GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>`
lowerCAmelCase = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>`
logger.info(F'Loading text from {args.file_path}' )
with open(args.file_path , 'r' , encoding='utf8' ) as fp:
lowerCAmelCase = fp.readlines()
logger.info('Start encoding' )
logger.info(F'{len(_UpperCAmelCase )} examples to process.' )
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 1_0000
lowerCAmelCase = time.time()
for text in data:
lowerCAmelCase = F'{bos} {text.strip()} {sep}'
lowerCAmelCase = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
rslt.append(_UpperCAmelCase )
iter += 1
if iter % interval == 0:
lowerCAmelCase = time.time()
logger.info(F'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
lowerCAmelCase = time.time()
logger.info('Finished binarization' )
logger.info(F'{len(_UpperCAmelCase )} examples processed.' )
lowerCAmelCase = F'{args.dump_file}.{args.tokenizer_name}.pickle'
lowerCAmelCase = tokenizer.vocab_size
if vocab_size < (1 << 16):
lowerCAmelCase = [np.uintaa(_UpperCAmelCase ) for d in rslt]
else:
lowerCAmelCase = [np.intaa(_UpperCAmelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'Dump to {dp_file}' )
with open(_UpperCAmelCase , 'wb' ) as handle:
pickle.dump(rslt_ , _UpperCAmelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 4 | 0 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a ( unittest.TestCase ):
"""simple docstring"""
@property
def UpperCAmelCase ( self : Tuple ) -> str:
torch.manual_seed(0 )
__UpperCAmelCase : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
__UpperCAmelCase : str = self.dummy_uncond_unet
__UpperCAmelCase : int = ScoreSdeVeScheduler()
__UpperCAmelCase : List[Any] = ScoreSdeVePipeline(unet=_snake_case , scheduler=_snake_case )
sde_ve.to(_snake_case )
sde_ve.set_progress_bar_config(disable=_snake_case )
__UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
__UpperCAmelCase : Tuple = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=_snake_case ).images
__UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
__UpperCAmelCase : List[str] = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=_snake_case , return_dict=_snake_case )[
0
]
__UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
__UpperCAmelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase : Optional[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase : int = """google/ncsnpp-church-256"""
__UpperCAmelCase : List[Any] = UNetaDModel.from_pretrained(_snake_case )
__UpperCAmelCase : str = ScoreSdeVeScheduler.from_pretrained(_snake_case )
__UpperCAmelCase : Optional[Any] = ScoreSdeVePipeline(unet=_snake_case , scheduler=_snake_case )
sde_ve.to(_snake_case )
sde_ve.set_progress_bar_config(disable=_snake_case )
__UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=_snake_case ).images
__UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__UpperCAmelCase : Union[str, Any] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 63 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class a ( a__ ):
snake_case__ = '''bert'''
def __init__( self , _snake_case=3_05_22 , _snake_case=7_68 , _snake_case=12 , _snake_case=12 , _snake_case=30_72 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=0 , _snake_case="absolute" , _snake_case=True , _snake_case=None , **_snake_case , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , **_snake_case )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
lowerCAmelCase = classifier_dropout
class a ( a__ ):
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 4 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowercase_ = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
lowercase_ = {'''facebook/blenderbot-3B''': 128}
class __UpperCamelCase ( a__ ):
"""simple docstring"""
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
lowerCAmelCase_ = BlenderbotTokenizer
def __init__( self : List[str] , _A : Optional[Any]=None , _A : Optional[Any]=None , _A : Optional[int]=None , _A : Tuple="replace" , _A : Tuple="<s>" , _A : Dict="</s>" , _A : Tuple="</s>" , _A : List[Any]="<s>" , _A : Optional[int]="<unk>" , _A : List[str]="<pad>" , _A : Dict="<mask>" , _A : Optional[Any]=False , _A : Optional[int]=True , **_A : Optional[int] , ):
"""simple docstring"""
super().__init__(
_snake_case , _snake_case , tokenizer_file=_snake_case , errors=_snake_case , bos_token=_snake_case , eos_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , unk_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case , **_snake_case , )
__SCREAMING_SNAKE_CASE : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _snake_case ) != add_prefix_space:
__SCREAMING_SNAKE_CASE : int = getattr(_snake_case , pre_tok_state.pop('''type''' ) )
__SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space
__SCREAMING_SNAKE_CASE : int = pre_tok_class(**_snake_case )
__SCREAMING_SNAKE_CASE : Dict = add_prefix_space
__SCREAMING_SNAKE_CASE : Tuple = '''post_processor'''
__SCREAMING_SNAKE_CASE : Tuple = getattr(self.backend_tokenizer , _snake_case , _snake_case )
if tokenizer_component_instance:
__SCREAMING_SNAKE_CASE : List[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__SCREAMING_SNAKE_CASE : str = tuple(state['''sep'''] )
if "cls" in state:
__SCREAMING_SNAKE_CASE : int = tuple(state['''cls'''] )
__SCREAMING_SNAKE_CASE : Dict = False
if state.get('''add_prefix_space''' , _snake_case ) != add_prefix_space:
__SCREAMING_SNAKE_CASE : int = add_prefix_space
__SCREAMING_SNAKE_CASE : Optional[Any] = True
if state.get('''trim_offsets''' , _snake_case ) != trim_offsets:
__SCREAMING_SNAKE_CASE : List[str] = trim_offsets
__SCREAMING_SNAKE_CASE : Dict = True
if changes_to_apply:
__SCREAMING_SNAKE_CASE : List[Any] = getattr(_snake_case , state.pop('''type''' ) )
__SCREAMING_SNAKE_CASE : List[str] = component_class(**_snake_case )
setattr(self.backend_tokenizer , _snake_case , _snake_case )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase__ ( self : List[str] , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else value
__SCREAMING_SNAKE_CASE : Tuple = value
def UpperCAmelCase__ ( self : str , *_A : int , **_A : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = kwargs.get('''is_split_into_words''' , _snake_case )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_snake_case , **_snake_case )
def UpperCAmelCase__ ( self : Union[str, Any] , *_A : str , **_A : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = kwargs.get('''is_split_into_words''' , _snake_case )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_snake_case , **_snake_case )
def UpperCAmelCase__ ( self : Union[str, Any] , _A : str , _A : List[Any] = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
def UpperCAmelCase__ ( self : List[Any] , _A : Tuple , _A : List[str] = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self : str , _A : Union[str, Any] , _A : str = None ):
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self : Dict , _A : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(_snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = ''' '''.join(_snake_case )
__SCREAMING_SNAKE_CASE : Dict = self.encode(_snake_case )
if len(_snake_case ) > self.model_max_length:
__SCREAMING_SNAKE_CASE : List[Any] = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 74 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a ( a__ , unittest.TestCase ):
snake_case__ = DanceDiffusionPipeline
snake_case__ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {
'''callback''',
'''latents''',
'''callback_steps''',
'''output_type''',
'''num_images_per_prompt''',
}
snake_case__ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_snake_case , use_timestep_embedding=_snake_case , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
lowerCAmelCase = IPNDMScheduler()
lowerCAmelCase = {
'unet': unet,
'scheduler': scheduler,
}
return components
def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(_snake_case )
else:
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowerCAmelCase = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = DanceDiffusionPipeline(**_snake_case )
lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = self.get_dummy_inputs(_snake_case )
lowerCAmelCase = pipe(**_snake_case )
lowerCAmelCase = output.audios
lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowerCAmelCase = np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = torch_device
lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(generator=_snake_case , num_inference_steps=1_00 , audio_length_in_s=4.096 )
lowerCAmelCase = output.audios
lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase = np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = torch_device
lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(generator=_snake_case , num_inference_steps=1_00 , audio_length_in_s=4.096 )
lowerCAmelCase = output.audios
lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase = np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 4 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCAmelCase : Any = logging.get_logger(__name__)
UpperCAmelCase : List[str] = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
UpperCAmelCase : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def _SCREAMING_SNAKE_CASE ( a ) -> int:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
__A : str = model_type_to_module_name(_UpperCAmelCase )
__A : Dict = importlib.import_module(F""".{module_name}""" , 'transformers.models' )
try:
return getattr(_UpperCAmelCase , _UpperCAmelCase )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(_UpperCAmelCase , '__name__' , _UpperCAmelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__A : List[Any] = importlib.import_module('transformers' )
if hasattr(_UpperCAmelCase , _UpperCAmelCase ):
return getattr(_UpperCAmelCase , _UpperCAmelCase )
return None
def _SCREAMING_SNAKE_CASE ( a , a = None , a = False , a = False , a = None , a = None , a = None , a = False , **a , ) -> Any:
__A : Optional[int] = get_file_from_repo(
_UpperCAmelCase , _UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , resume_download=_UpperCAmelCase , proxies=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , revision=_UpperCAmelCase , local_files_only=_UpperCAmelCase , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(_UpperCAmelCase , encoding='utf-8' ) as reader:
return json.load(_UpperCAmelCase )
class _A:
"""simple docstring"""
def __init__( self ):
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(_snake_case )
def UpperCAmelCase_ ( cls , _A , **_A ):
__A : Tuple = kwargs.pop('config' , _snake_case )
__A : Tuple = kwargs.pop('trust_remote_code' , _snake_case )
__A : str = True
__A , __A : Optional[Any] = ImageProcessingMixin.get_image_processor_dict(_snake_case , **_snake_case )
__A : Optional[int] = config_dict.get('image_processor_type' , _snake_case )
__A : Union[str, Any] = None
if "AutoImageProcessor" in config_dict.get('auto_map' , {} ):
__A : Dict = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
__A : Optional[int] = config_dict.pop('feature_extractor_type' , _snake_case )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
__A : List[str] = feature_extractor_class.replace('FeatureExtractor' , 'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
__A : Tuple = config_dict['auto_map']['AutoFeatureExtractor']
__A : Any = feature_extractor_auto_map.replace('FeatureExtractor' , 'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_snake_case , _snake_case ):
__A : Union[str, Any] = AutoConfig.from_pretrained(_snake_case , **_snake_case )
# It could be in `config.image_processor_type``
__A : Dict = getattr(_snake_case , 'image_processor_type' , _snake_case )
if hasattr(_snake_case , 'auto_map' ) and "AutoImageProcessor" in config.auto_map:
__A : int = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
__A : List[str] = image_processor_class_from_name(_snake_case )
__A : Optional[int] = image_processor_auto_map is not None
__A : Any = image_processor_class is not None or type(_snake_case ) in IMAGE_PROCESSOR_MAPPING
__A : str = resolve_trust_remote_code(
_snake_case , _snake_case , _snake_case , _snake_case )
if has_remote_code and trust_remote_code:
__A : Any = get_class_from_dynamic_module(
_snake_case , _snake_case , **_snake_case )
__A : Union[str, Any] = kwargs.pop('code_revision' , _snake_case )
if os.path.isdir(_snake_case ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_snake_case , **_snake_case )
elif image_processor_class is not None:
return image_processor_class.from_dict(_snake_case , **_snake_case )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_snake_case ) in IMAGE_PROCESSOR_MAPPING:
__A : Dict = IMAGE_PROCESSOR_MAPPING[type(_snake_case )]
return image_processor_class.from_dict(_snake_case , **_snake_case )
raise ValueError(
F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def UpperCAmelCase_ ( _A , _A ):
IMAGE_PROCESSOR_MAPPING.register(_snake_case , _snake_case )
| 239 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class a :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=False , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , use_stable_embedding=_snake_case , )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenLlamaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = OpenLlamaModel(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , )
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , )
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = OpenLlamaForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = OpenLlamaForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
# first forward pass
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , use_cache=_snake_case , )
lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , output_hidden_states=_snake_case , )['hidden_states'][0]
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , past_key_values=_snake_case , output_hidden_states=_snake_case , )['hidden_states'][0]
# select random slice
lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-3 ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a ( a__ , a__ , a__ , unittest.TestCase ):
snake_case__ = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
snake_case__ = (OpenLlamaForCausalLM,) if is_torch_available() else ()
snake_case__ = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = OpenLlamaModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase = type
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(_snake_case )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = 'single_label_classification'
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(_snake_case )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = 'multi_label_classification'
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(_snake_case )
lowerCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = ids_tensor([1, 10] , config.vocab_size )
lowerCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase = OpenLlamaModel(_snake_case )
original_model.to(_snake_case )
original_model.eval()
lowerCAmelCase = original_model(_snake_case ).last_hidden_state
lowerCAmelCase = original_model(_snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase = {'type': scaling_type, 'factor': 10.0}
lowerCAmelCase = OpenLlamaModel(_snake_case )
scaled_model.to(_snake_case )
scaled_model.eval()
lowerCAmelCase = scaled_model(_snake_case ).last_hidden_state
lowerCAmelCase = scaled_model(_snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
| 4 | 0 |
'''simple docstring'''
class __a :
def __init__( self : Optional[int] ,lowerCamelCase : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = [0] * size
__SCREAMING_SNAKE_CASE = [0] * size
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase : Dict ):
'''simple docstring'''
return index | (index + 1)
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return (index & (index + 1)) - 1
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = value
while index < self.size:
__SCREAMING_SNAKE_CASE = self.get_prev(_snake_case ) + 1
if current_left_border == index:
__SCREAMING_SNAKE_CASE = value
else:
__SCREAMING_SNAKE_CASE = max(_snake_case ,_snake_case ,_snake_case )
__SCREAMING_SNAKE_CASE = self.get_next(_snake_case )
def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ):
'''simple docstring'''
right -= 1 # Because of right is exclusive
__SCREAMING_SNAKE_CASE = 0
while left <= right:
__SCREAMING_SNAKE_CASE = self.get_prev(_snake_case )
if left <= current_left:
__SCREAMING_SNAKE_CASE = max(_snake_case ,self.tree[right] )
__SCREAMING_SNAKE_CASE = current_left
else:
__SCREAMING_SNAKE_CASE = max(_snake_case ,self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109 |
"""simple docstring"""
from typing import Any
class a :
def __init__( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = data
lowerCAmelCase = None
def __repr__( self ):
"""simple docstring"""
return F'Node({self.data})'
class a :
def __init__( self ):
"""simple docstring"""
lowerCAmelCase = None
def __iter__( self ):
"""simple docstring"""
lowerCAmelCase = self.head
while node:
yield node.data
lowerCAmelCase = node.next
def __len__( self ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ):
"""simple docstring"""
return "->".join([str(_snake_case ) for item in self] )
def __getitem__( self , _snake_case ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , _snake_case , _snake_case ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
lowerCAmelCase = self.head
for _ in range(_snake_case ):
lowerCAmelCase = current.next
lowerCAmelCase = data
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
self.insert_nth(len(self ) , _snake_case )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
self.insert_nth(0 , _snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
lowerCAmelCase = Node(_snake_case )
if self.head is None:
lowerCAmelCase = new_node
elif index == 0:
lowerCAmelCase = self.head # link new_node to head
lowerCAmelCase = new_node
else:
lowerCAmelCase = self.head
for _ in range(index - 1 ):
lowerCAmelCase = temp.next
lowerCAmelCase = temp.next
lowerCAmelCase = new_node
def UpperCamelCase__ ( self ): # print every node data
"""simple docstring"""
print(self )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.delete_nth(0 )
def UpperCamelCase__ ( self ): # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase__ ( self , _snake_case = 0 ):
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
lowerCAmelCase = self.head # default first node
if index == 0:
lowerCAmelCase = self.head.next
else:
lowerCAmelCase = self.head
for _ in range(index - 1 ):
lowerCAmelCase = temp.next
lowerCAmelCase = temp.next
lowerCAmelCase = temp.next.next
return delete_node.data
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.head is None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = None
lowerCAmelCase = self.head
while current:
# Store the current node's next node.
lowerCAmelCase = current.next
# Make the current node's next point backwards
lowerCAmelCase = prev
# Make the previous node be the current node
lowerCAmelCase = current
# Make the current node the next node (to progress iteration)
lowerCAmelCase = next_node
# Return prev in order to put the head at the end
lowerCAmelCase = prev
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = LinkedList()
assert linked_list.is_empty() is True
assert str(_UpperCAmelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_UpperCAmelCase ) == i
linked_list.insert_nth(_UpperCAmelCase , i + 1 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_UpperCAmelCase ) == 9
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCAmelCase = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(-8 , 1 ) )
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = [
-9,
100,
Node(7734_5112 ),
'dlrow olleH',
7,
5555,
0,
-192.5_5555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
lowerCAmelCase = LinkedList()
for i in test_input:
linked_list.insert_tail(_UpperCAmelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_UpperCAmelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCAmelCase = linked_list.delete_head()
assert result == -9
assert (
str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCAmelCase = linked_list.delete_tail()
assert result == 12.2
assert (
str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCAmelCase = linked_list.delete_nth(10 )
assert result is None
assert (
str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_UpperCAmelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_UpperCAmelCase )
assert (
str(_UpperCAmelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_UpperCAmelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _SCREAMING_SNAKE_CASE ():
from doctest import testmod
testmod()
lowerCAmelCase = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_UpperCAmelCase )
print('\nReading/changing Node data using indexing:' )
print(F'Element at Position 1: {linked_list[1]}' )
lowerCAmelCase = input('Enter New Value: ' ).strip()
print('New list:' )
print(_UpperCAmelCase )
print(F'length of linked_list is : {len(_UpperCAmelCase )}' )
if __name__ == "__main__":
main()
| 4 | 0 |
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod() | 623 |
"""simple docstring"""
from __future__ import annotations
import requests
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
lowerCAmelCase = F'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(_UpperCAmelCase ).json()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 10 ):
lowerCAmelCase = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
lowerCAmelCase = requests.get(_UpperCAmelCase ).json()[:max_stories]
return [get_hackernews_story(_UpperCAmelCase ) for story_id in story_ids]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 10 ):
lowerCAmelCase = hackernews_top_stories(_UpperCAmelCase )
return "\n".join('* [{title}]({url})'.format(**_UpperCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 4 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCamelCase :Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase :Tuple = '''
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)["depth"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline("depth-estimation")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to("cuda")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16
... )
>>> pipe = pipe.to("cuda")
>>> img = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/cat.png"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")
>>> prompt = "A robot, 4k photo"
>>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"
>>> generator = torch.Generator(device="cuda").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save("robot_cat.png")
```
'''
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=8 ):
'''simple docstring'''
A_ : Optional[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
A_ : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _lowerCAmelCase ( a__ ):
def __init__(self , lowercase , lowercase , lowercase , ):
super().__init__()
self.register_modules(
unet=_snake_case , scheduler=_snake_case , movq=_snake_case , )
A_ : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
if latents is None:
A_ : int = randn_tensor(_snake_case , generator=_snake_case , device=_snake_case , dtype=_snake_case )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
A_ : List[str] = latents.to(_snake_case )
A_ : Tuple = latents * scheduler.init_noise_sigma
return latents
def _a (self , lowercase=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
A_ : Union[str, Any] = torch.device(F'cuda:{gpu_id}' )
A_ : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_snake_case , _snake_case )
def _a (self , lowercase=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
A_ : Dict = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=_snake_case )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
A_ : Optional[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
A_, A_ : str = cpu_offload_with_hook(_snake_case , _snake_case , prev_module_hook=_snake_case )
# We'll offload the last model manually.
A_ : int = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _a (self ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_snake_case , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_snake_case )
def __call__(self , lowercase , lowercase , lowercase , lowercase = 512 , lowercase = 512 , lowercase = 100 , lowercase = 4.0 , lowercase = 1 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , ):
A_ : Tuple = self._execution_device
A_ : List[str] = guidance_scale > 1.0
if isinstance(_snake_case , _snake_case ):
A_ : Optional[int] = torch.cat(_snake_case , dim=0 )
if isinstance(_snake_case , _snake_case ):
A_ : int = torch.cat(_snake_case , dim=0 )
if isinstance(_snake_case , _snake_case ):
A_ : List[str] = torch.cat(_snake_case , dim=0 )
A_ : Optional[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
A_ : Optional[int] = image_embeds.repeat_interleave(_snake_case , dim=0 )
A_ : str = negative_image_embeds.repeat_interleave(_snake_case , dim=0 )
A_ : Tuple = hint.repeat_interleave(_snake_case , dim=0 )
A_ : Any = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_snake_case )
A_ : List[Any] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=_snake_case )
self.scheduler.set_timesteps(_snake_case , device=_snake_case )
A_ : int = self.scheduler.timesteps
A_ : str = self.movq.config.latent_channels
A_, A_ : Any = downscale_height_and_width(_snake_case , _snake_case , self.movq_scale_factor )
# create initial latent
A_ : Tuple = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _snake_case , _snake_case , _snake_case , self.scheduler , )
for i, t in enumerate(self.progress_bar(_snake_case ) ):
# expand the latents if we are doing classifier free guidance
A_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A_ : Any = {"""image_embeds""": image_embeds, """hint""": hint}
A_ : int = self.unet(
sample=_snake_case , timestep=_snake_case , encoder_hidden_states=_snake_case , added_cond_kwargs=_snake_case , return_dict=_snake_case , )[0]
if do_classifier_free_guidance:
A_, A_ : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
A_, A_ : str = noise_pred.chunk(2 )
A_, A_ : Optional[int] = variance_pred.chunk(2 )
A_ : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
A_ : Any = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
A_, A_ : Any = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
A_ : Union[str, Any] = self.scheduler.step(
_snake_case , _snake_case , _snake_case , generator=_snake_case , )[0]
# post-processing
A_ : Tuple = self.movq.decode(_snake_case , force_not_quantize=_snake_case )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
A_ : int = image * 0.5 + 0.5
A_ : Dict = image.clamp(0 , 1 )
A_ : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A_ : Dict = self.numpy_to_pil(_snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_snake_case ) | 667 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any ):
lowerCAmelCase = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCAmelCase = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowerCAmelCase = 4
lowerCAmelCase = 48
lowerCAmelCase = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCAmelCase = [6, 6, 6, 6]
lowerCAmelCase = 60
lowerCAmelCase = [6, 6, 6, 6]
lowerCAmelCase = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCAmelCase = 4
lowerCAmelCase = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowerCAmelCase = 1
lowerCAmelCase = 1
lowerCAmelCase = 126
lowerCAmelCase = 7
lowerCAmelCase = 255.0
lowerCAmelCase = ''
return config
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ):
if "patch_embed.proj" in name and "layers" not in name:
lowerCAmelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
lowerCAmelCase = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
lowerCAmelCase = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
lowerCAmelCase = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
lowerCAmelCase = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowerCAmelCase = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowerCAmelCase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowerCAmelCase = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowerCAmelCase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCAmelCase = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
lowerCAmelCase = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
lowerCAmelCase = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
lowerCAmelCase = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
lowerCAmelCase = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
lowerCAmelCase = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
lowerCAmelCase = 'layernorm.weight'
if name == "norm.bias":
lowerCAmelCase = 'layernorm.bias'
if "conv_first" in name:
lowerCAmelCase = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowerCAmelCase = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowerCAmelCase = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
lowerCAmelCase = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
lowerCAmelCase = name.replace('upsample.2' , 'upsample.convolution_1' )
lowerCAmelCase = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
lowerCAmelCase = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
lowerCAmelCase = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
lowerCAmelCase = 'swin2sr.' + name
return name
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Dict ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase = orig_state_dict.pop(_UpperCAmelCase )
if "qkv" in key:
lowerCAmelCase = key.split('.' )
lowerCAmelCase = int(key_split[1] )
lowerCAmelCase = int(key_split[4] )
lowerCAmelCase = config.embed_dim
if "weight" in key:
lowerCAmelCase = val[:dim, :]
lowerCAmelCase = val[dim : dim * 2, :]
lowerCAmelCase = val[-dim:, :]
else:
lowerCAmelCase = val[:dim]
lowerCAmelCase = val[dim : dim * 2]
lowerCAmelCase = val[-dim:]
pass
else:
lowerCAmelCase = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple ):
lowerCAmelCase = get_config(_UpperCAmelCase )
lowerCAmelCase = SwinaSRForImageSuperResolution(_UpperCAmelCase )
model.eval()
lowerCAmelCase = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location='cpu' )
lowerCAmelCase = convert_state_dict(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase ,lowerCAmelCase = model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
raise ValueError('Missing keys when converting: {}'.format(_UpperCAmelCase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F'Unexpected key {key} in state_dict' )
# verify values
lowerCAmelCase = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
lowerCAmelCase = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('RGB' )
lowerCAmelCase = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowerCAmelCase = 126 if 'Jpeg' in checkpoint_url else 256
lowerCAmelCase = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowerCAmelCase = transforms(_UpperCAmelCase ).unsqueeze(0 )
if config.num_channels == 1:
lowerCAmelCase = pixel_values[:, 0, :, :].unsqueeze(1 )
lowerCAmelCase = model(_UpperCAmelCase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowerCAmelCase = torch.Size([1, 3, 512, 512] )
lowerCAmelCase = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCAmelCase = torch.Size([1, 3, 1024, 1024] )
lowerCAmelCase = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowerCAmelCase = torch.Size([1, 3, 1024, 1024] )
lowerCAmelCase = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCAmelCase = torch.Size([1, 3, 512, 512] )
lowerCAmelCase = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCAmelCase = torch.Size([1, 3, 1024, 1024] )
lowerCAmelCase = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), F'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , _UpperCAmelCase , atol=1e-3 )
print('Looks ok!' )
lowerCAmelCase = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
lowerCAmelCase = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_UpperCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
model.push_to_hub(F'caidas/{model_name}' )
processor.push_to_hub(F'caidas/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
__UpperCamelCase : Optional[int] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['flax', 'transformers']
def __init__( self: int , *_SCREAMING_SNAKE_CASE: Optional[int] , **_SCREAMING_SNAKE_CASE: Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["flax", "transformers"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: List[Any] , *_SCREAMING_SNAKE_CASE: str , **_SCREAMING_SNAKE_CASE: Optional[int]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["flax", "transformers"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Tuple , *_SCREAMING_SNAKE_CASE: Optional[int] , **_SCREAMING_SNAKE_CASE: int) -> int:
"""simple docstring"""
requires_backends(cls , ["flax", "transformers"])
class A__ ( metaclass=a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['flax', 'transformers']
def __init__( self: List[Any] , *_SCREAMING_SNAKE_CASE: Union[str, Any] , **_SCREAMING_SNAKE_CASE: Dict) -> int:
"""simple docstring"""
requires_backends(self , ["flax", "transformers"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Union[str, Any] , *_SCREAMING_SNAKE_CASE: Any , **_SCREAMING_SNAKE_CASE: Any) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["flax", "transformers"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Tuple , *_SCREAMING_SNAKE_CASE: Optional[int] , **_SCREAMING_SNAKE_CASE: Dict) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["flax", "transformers"])
class A__ ( metaclass=a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['flax', 'transformers']
def __init__( self: Optional[int] , *_SCREAMING_SNAKE_CASE: Dict , **_SCREAMING_SNAKE_CASE: Any) -> str:
"""simple docstring"""
requires_backends(self , ["flax", "transformers"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: int , *_SCREAMING_SNAKE_CASE: Optional[int] , **_SCREAMING_SNAKE_CASE: Any) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["flax", "transformers"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: int , *_SCREAMING_SNAKE_CASE: Dict , **_SCREAMING_SNAKE_CASE: Union[str, Any]) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["flax", "transformers"])
class A__ ( metaclass=a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['flax', 'transformers']
def __init__( self: Any , *_SCREAMING_SNAKE_CASE: List[str] , **_SCREAMING_SNAKE_CASE: List[Any]) -> List[str]:
"""simple docstring"""
requires_backends(self , ["flax", "transformers"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Optional[Any] , *_SCREAMING_SNAKE_CASE: str , **_SCREAMING_SNAKE_CASE: Tuple) -> int:
"""simple docstring"""
requires_backends(cls , ["flax", "transformers"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Tuple , *_SCREAMING_SNAKE_CASE: Any , **_SCREAMING_SNAKE_CASE: Any) -> Dict:
"""simple docstring"""
requires_backends(cls , ["flax", "transformers"]) | 293 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class a ( a__ ):
snake_case__ = '''megatron-bert'''
def __init__( self , _snake_case=2_90_56 , _snake_case=10_24 , _snake_case=24 , _snake_case=16 , _snake_case=40_96 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=0 , _snake_case="absolute" , _snake_case=True , **_snake_case , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , **_snake_case )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
| 4 | 0 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = 3.0
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> List[Any]:
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_snake_case ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.2_5 ).to_kwargs() , {'''a''': 2, '''c''': 2.2_5} )
@require_cuda
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : Any = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
lowercase__ : Dict = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
lowercase__ : str = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , _snake_case )
@require_multi_gpu
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : str = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(_snake_case , env=os.environ.copy() )
if __name__ == "__main__":
__a: List[str] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
__a: Optional[Any] = Accelerator(kwargs_handlers=[ddp_scaler])
__a: Dict = torch.nn.Linear(1_00, 2_00)
__a: Optional[Any] = accelerator.prepare(model)
# Check the values changed in kwargs
__a: Dict = ''''''
__a: Tuple = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 152 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class lowerCamelCase ( unittest.TestCase ):
def a_ ( self ):
UpperCamelCase : Optional[int] = [[1, 2, 4], [1, 2, 3, 4]]
UpperCamelCase : int = DisjunctiveConstraint(_snake_case )
self.assertTrue(isinstance(dc.token_ids , _snake_case ) )
with self.assertRaises(_snake_case ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_snake_case ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def a_ ( self ):
UpperCamelCase : Optional[Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_snake_case ):
DisjunctiveConstraint(_snake_case ) # fails here
def a_ ( self ):
UpperCamelCase : Optional[int] = [[1, 2, 3], [1, 2, 4]]
UpperCamelCase : Optional[int] = DisjunctiveConstraint(_snake_case )
UpperCamelCase , UpperCamelCase , UpperCamelCase : Union[str, Any] = dc.update(1 )
UpperCamelCase : Union[str, Any] = stepped is True and completed is False and reset is False
self.assertTrue(_snake_case )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = dc.update(2 )
UpperCamelCase : Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(_snake_case )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = dc.update(3 )
UpperCamelCase : List[str] = stepped is True and completed is True and reset is False
self.assertTrue(_snake_case )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def a_ ( self ):
UpperCamelCase : Union[str, Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCamelCase : List[str] = DisjunctiveConstraint(_snake_case )
UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCamelCase , UpperCamelCase , UpperCamelCase : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 499 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class a ( a__ ):
snake_case__ = 42
class a ( a__ , a__ ):
@register_to_config
def __init__( self , _snake_case = 3 , _snake_case = 3 , _snake_case = ("DownEncoderBlock2D",) , _snake_case = ("UpDecoderBlock2D",) , _snake_case = (64,) , _snake_case = 1 , _snake_case = "silu" , _snake_case = 3 , _snake_case = 32 , _snake_case = 2_56 , _snake_case = 32 , _snake_case = None , _snake_case = 0.18_215 , _snake_case = "group" , ):
"""simple docstring"""
super().__init__()
# pass init params to Encoder
lowerCAmelCase = Encoder(
in_channels=_snake_case , out_channels=_snake_case , down_block_types=_snake_case , block_out_channels=_snake_case , layers_per_block=_snake_case , act_fn=_snake_case , norm_num_groups=_snake_case , double_z=_snake_case , )
lowerCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCAmelCase = nn.Convad(_snake_case , _snake_case , 1 )
lowerCAmelCase = VectorQuantizer(_snake_case , _snake_case , beta=0.25 , remap=_snake_case , sane_index_shape=_snake_case )
lowerCAmelCase = nn.Convad(_snake_case , _snake_case , 1 )
# pass init params to Decoder
lowerCAmelCase = Decoder(
in_channels=_snake_case , out_channels=_snake_case , up_block_types=_snake_case , block_out_channels=_snake_case , layers_per_block=_snake_case , act_fn=_snake_case , norm_num_groups=_snake_case , norm_type=_snake_case , )
@apply_forward_hook
def UpperCamelCase__ ( self , _snake_case , _snake_case = True ):
"""simple docstring"""
lowerCAmelCase = self.encoder(_snake_case )
lowerCAmelCase = self.quant_conv(_snake_case )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_snake_case )
@apply_forward_hook
def UpperCamelCase__ ( self , _snake_case , _snake_case = False , _snake_case = True ):
"""simple docstring"""
if not force_not_quantize:
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = self.quantize(_snake_case )
else:
lowerCAmelCase = h
lowerCAmelCase = self.post_quant_conv(_snake_case )
lowerCAmelCase = self.decoder(_snake_case , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case = True ):
"""simple docstring"""
lowerCAmelCase = sample
lowerCAmelCase = self.encode(_snake_case ).latents
lowerCAmelCase = self.decode(_snake_case ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_snake_case )
| 4 | 0 |
def snake_case_ () -> List[str]:
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
__UpperCAmelCase = generate_large_matrix()
__UpperCAmelCase = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def snake_case_ (__A : list[list[int]] ) -> List[Any]:
assert all(row == sorted(_UpperCAmelCase , reverse=_UpperCAmelCase ) for row in grid )
assert all(list(_UpperCAmelCase ) == sorted(_UpperCAmelCase , reverse=_UpperCAmelCase ) for col in zip(*_UpperCAmelCase ) )
def snake_case_ (__A : list[int] ) -> Tuple:
__lowerCAmelCase : Dict = 0
__lowerCAmelCase : Dict = len(_UpperCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__lowerCAmelCase : Optional[int] = (left + right) // 2
__lowerCAmelCase : Optional[int] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__lowerCAmelCase : Any = mid + 1
else:
__lowerCAmelCase : Union[str, Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(_UpperCAmelCase )
def snake_case_ (__A : list[list[int]] ) -> Optional[int]:
__lowerCAmelCase : Union[str, Any] = 0
__lowerCAmelCase : Optional[Any] = len(grid[0] )
for i in range(len(_UpperCAmelCase ) ):
__lowerCAmelCase : List[str] = find_negative_index(grid[i][:bound] )
total += bound
return (len(_UpperCAmelCase ) * len(grid[0] )) - total
def snake_case_ (__A : list[list[int]] ) -> int:
return len([number for row in grid for number in row if number < 0] )
def snake_case_ (__A : list[list[int]] ) -> Dict:
__lowerCAmelCase : Any = 0
for row in grid:
for i, number in enumerate(_UpperCAmelCase ):
if number < 0:
total += len(_UpperCAmelCase ) - i
break
return total
def snake_case_ () -> str:
from timeit import timeit
print("""Running benchmarks""" )
__lowerCAmelCase : List[Any] = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__lowerCAmelCase : Tuple = timeit(f'''{func}(grid=grid)''' , setup=_UpperCAmelCase , number=5_0_0 )
print(f'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 651 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
__UpperCamelCase : Optional[Any] = tuple[int, int]
class a :
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = vertices
lowerCAmelCase = {
(min(_snake_case ), max(_snake_case )): weight for edge, weight in edges.items()
}
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowerCAmelCase = weight
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = Graph({min(self.vertices )} , {} )
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowerCAmelCase = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowerCAmelCase = edge
lowerCAmelCase = weight
subgraph.add_edge(_snake_case , _snake_case )
return subgraph
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "p107_network.txt" ):
lowerCAmelCase = os.path.abspath(os.path.dirname(_UpperCAmelCase ) )
lowerCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = {}
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
with open(_UpperCAmelCase ) as f:
lowerCAmelCase = f.read().strip().split('\n' )
lowerCAmelCase = [line.split(',' ) for line in data]
for edgea in range(1 , len(_UpperCAmelCase ) ):
for edgea in range(_UpperCAmelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
lowerCAmelCase = int(adjaceny_matrix[edgea][edgea] )
lowerCAmelCase = Graph(set(range(len(_UpperCAmelCase ) ) ) , _UpperCAmelCase )
lowerCAmelCase = graph.prims_algorithm()
lowerCAmelCase = sum(graph.edges.values() )
lowerCAmelCase = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 4 | 0 |
a = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
a = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
a = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 412 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ):
lowerCAmelCase = np.array([[1, item, train_mtch[i]] for i, item in enumerate(_UpperCAmelCase )] )
lowerCAmelCase = np.array(_UpperCAmelCase )
lowerCAmelCase = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , _UpperCAmelCase ) ) , x.transpose() ) , _UpperCAmelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ):
lowerCAmelCase = (1, 2, 1)
lowerCAmelCase = (1, 1, 0, 7)
lowerCAmelCase = SARIMAX(
_UpperCAmelCase , exog=_UpperCAmelCase , order=_UpperCAmelCase , seasonal_order=_UpperCAmelCase )
lowerCAmelCase = model.fit(disp=_UpperCAmelCase , maxiter=600 , method='nm' )
lowerCAmelCase = model_fit.predict(1 , len(_UpperCAmelCase ) , exog=[test_match] )
return result[0]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ):
lowerCAmelCase = SVR(kernel='rbf' , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = regressor.predict(_UpperCAmelCase )
return y_pred[0]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list ):
train_user.sort()
lowerCAmelCase = np.percentile(_UpperCAmelCase , 25 )
lowerCAmelCase = np.percentile(_UpperCAmelCase , 75 )
lowerCAmelCase = qa - qa
lowerCAmelCase = qa - (iqr * 0.1)
return low_lim
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : float ):
lowerCAmelCase = 0
lowerCAmelCase = 0
for i in list_vote:
if i > actual_result:
lowerCAmelCase = not_safe + 1
else:
if abs(abs(_UpperCAmelCase ) - abs(_UpperCAmelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
__UpperCamelCase : Optional[Any] = [[1_8231, 0.0, 1], [2_2621, 1.0, 2], [1_5675, 0.0, 3], [2_3583, 1.0, 4]]
__UpperCamelCase : Any = pd.DataFrame(
data_input, columns=['''total_user''', '''total_even''', '''days''']
)
__UpperCamelCase : Dict = Normalizer().fit_transform(data_input_df.values)
# split data
__UpperCamelCase : Dict = normalize_df[:, 2].tolist()
__UpperCamelCase : Union[str, Any] = normalize_df[:, 0].tolist()
__UpperCamelCase : List[str] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
__UpperCamelCase : Optional[int] = normalize_df[:, [1, 2]].tolist()
__UpperCamelCase : Tuple = x[: len(x) - 1]
__UpperCamelCase : Any = x[len(x) - 1 :]
# for linear regression & sarimax
__UpperCamelCase : str = total_date[: len(total_date) - 1]
__UpperCamelCase : Union[str, Any] = total_user[: len(total_user) - 1]
__UpperCamelCase : List[Any] = total_match[: len(total_match) - 1]
__UpperCamelCase : Optional[Any] = total_date[len(total_date) - 1 :]
__UpperCamelCase : str = total_user[len(total_user) - 1 :]
__UpperCamelCase : str = total_match[len(total_match) - 1 :]
# voting system with forecasting
__UpperCamelCase : Any = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
__UpperCamelCase : List[str] = '''''' if data_safety_checker(res_vote, tst_user) else '''not '''
print('''Today\'s data is {not_str}safe.''')
| 4 | 0 |
def lowerCamelCase__ ( __lowerCamelCase : str ):
__UpperCAmelCase : List[Any] = [False] * len(_UpperCAmelCase )
__UpperCAmelCase : List[Any] = [-1] * len(_UpperCAmelCase )
def dfs(__lowerCamelCase : str , __lowerCamelCase : Optional[Any] ):
__UpperCAmelCase : Dict = True
__UpperCAmelCase : Dict = c
for u in graph[v]:
if not visited[u]:
dfs(_UpperCAmelCase , 1 - c )
for i in range(len(_UpperCAmelCase ) ):
if not visited[i]:
dfs(_UpperCAmelCase , 0 )
for i in range(len(_UpperCAmelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
a : Dict = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 63 |
"""simple docstring"""
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=_UpperCAmelCase , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=_UpperCAmelCase , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=_UpperCAmelCase , default=42 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=_UpperCAmelCase , default=0 , help='cuda_id.' , )
lowerCAmelCase = parser.parse_args()
return args
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] ):
if not len(_UpperCAmelCase ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
lowerCAmelCase ,lowerCAmelCase = imgs[0].size
lowerCAmelCase = Image.new('RGB' , size=(cols * w, rows * h) )
lowerCAmelCase ,lowerCAmelCase = grid.size
for i, img in enumerate(_UpperCAmelCase ):
grid.paste(_UpperCAmelCase , box=(i % cols * w, i // cols * h) )
return grid
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any]="robotic cat with wings" , _UpperCAmelCase : Optional[int]=7.5 , _UpperCAmelCase : Dict=50 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : int=42 , ):
lowerCAmelCase = torch.Generator(pipeline.device ).manual_seed(_UpperCAmelCase )
lowerCAmelCase = pipeline(
_UpperCAmelCase , guidance_scale=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , generator=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , ).images
lowerCAmelCase = int(math.sqrt(_UpperCAmelCase ) )
lowerCAmelCase = image_grid(_UpperCAmelCase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
__UpperCamelCase : Optional[Any] = parse_args()
# Load models and create wrapper for stable diffusion
__UpperCamelCase : List[Any] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
__UpperCamelCase : str = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
__UpperCamelCase : Optional[int] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
__UpperCamelCase : List[str] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
__UpperCamelCase : Tuple = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__UpperCamelCase : Union[str, Any] = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
__UpperCamelCase : Dict = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
__UpperCamelCase : Dict = unet.to(torch.device('''cuda''', args.cuda_id))
__UpperCamelCase : Optional[Any] = pipeline.to(unet.device)
__UpperCamelCase ,__UpperCamelCase : List[Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
__UpperCamelCase : int = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 4 | 0 |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
lowercase_ = logging.getLogger(__name__)
require_version("""pytorch_lightning>=1.0.4""")
lowercase_ = {
'''base''': AutoModel,
'''sequence-classification''': AutoModelForSequenceClassification,
'''question-answering''': AutoModelForQuestionAnswering,
'''pretraining''': AutoModelForPreTraining,
'''token-classification''': AutoModelForTokenClassification,
'''language-modeling''': AutoModelWithLMHead,
'''summarization''': AutoModelForSeqaSeqLM,
'''translation''': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
lowercase_ = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
lowercase_ = sorted(arg_to_scheduler.keys())
lowercase_ = '''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}'''
class __UpperCamelCase ( pl.LightningModule ):
"""simple docstring"""
def __init__( self : Optional[Any] , _A : int , _A : Any=None , _A : List[Any]="base" , _A : Optional[Any]=None , _A : str=None , _A : Union[str, Any]=None , **_A : Dict , ):
"""simple docstring"""
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(_snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : int = Path(self.hparams.output_dir )
__SCREAMING_SNAKE_CASE : str = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=_snake_case , **_snake_case , )
else:
__SCREAMING_SNAKE_CASE : Tuple = config
__SCREAMING_SNAKE_CASE : Optional[int] = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams , _snake_case , _snake_case ):
assert hasattr(self.config , _snake_case ), F'''model config doesn\'t have a `{p}` attribute'''
setattr(self.config , _snake_case , getattr(self.hparams , _snake_case ) )
if tokenizer is None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_snake_case , )
else:
__SCREAMING_SNAKE_CASE : Any = tokenizer
__SCREAMING_SNAKE_CASE : List[str] = MODEL_MODES[mode]
if model is None:
__SCREAMING_SNAKE_CASE : Optional[int] = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=_snake_case , )
else:
__SCREAMING_SNAKE_CASE : int = model
def UpperCAmelCase__ ( self : Optional[int] , *_A : str , **_A : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self.model_type.from_pretrained(*_snake_case , **_snake_case )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = arg_to_scheduler[self.hparams.lr_scheduler]
__SCREAMING_SNAKE_CASE : Union[str, Any] = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = self.model
__SCREAMING_SNAKE_CASE : Tuple = ['''bias''', '''LayerNorm.weight''']
__SCREAMING_SNAKE_CASE : Dict = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
__SCREAMING_SNAKE_CASE : Any = Adafactor(
_snake_case , lr=self.hparams.learning_rate , scale_parameter=_snake_case , relative_step=_snake_case )
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = AdamW(
_snake_case , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__SCREAMING_SNAKE_CASE : List[Any] = optimizer
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_lr_scheduler()
return [optimizer], [scheduler]
def UpperCAmelCase__ ( self : Optional[int] , _A : List[Any] , _A : Tuple ):
"""simple docstring"""
return self.validation_step(_snake_case , _snake_case )
def UpperCAmelCase__ ( self : List[Any] , _A : List[Any] ):
"""simple docstring"""
return self.validation_end(_snake_case )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__SCREAMING_SNAKE_CASE : Dict = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def UpperCAmelCase__ ( self : str , _A : List[str] ):
"""simple docstring"""
if stage == "test":
__SCREAMING_SNAKE_CASE : Dict = len(self.test_dataloader().dataset )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=_snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = len(self.train_dataloader().dataset )
def UpperCAmelCase__ ( self : str , _A : Any , _A : Tuple , _A : Optional[Any] = False ):
"""simple docstring"""
raise NotImplementedError('''You must implement this for your task''' )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return self.train_loader
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=_snake_case )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=_snake_case )
def UpperCAmelCase__ ( self : List[str] , _A : Optional[Any] ):
"""simple docstring"""
return os.path.join(
self.hparams.data_dir , '''cached_{}_{}_{}'''.format(
_snake_case , list(filter(_snake_case , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def UpperCAmelCase__ ( self : List[Any] , _A : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.output_dir.joinpath('''best_tfmr''' )
__SCREAMING_SNAKE_CASE : Any = self.step_count
self.model.save_pretrained(_snake_case )
self.tokenizer.save_pretrained(_snake_case )
@staticmethod
def UpperCAmelCase__ ( _A : List[Any] , _A : Optional[Any] ):
"""simple docstring"""
parser.add_argument(
'''--model_name_or_path''' , default=_snake_case , type=_snake_case , required=_snake_case , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--config_name''' , default='''''' , type=_snake_case , help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' , default=_snake_case , type=_snake_case , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument(
'''--cache_dir''' , default=str(Path(_snake_case ).parent / '''test_run''' / '''cache''' ) , type=_snake_case , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , )
parser.add_argument(
'''--encoder_layerdrop''' , type=_snake_case , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--decoder_layerdrop''' , type=_snake_case , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--dropout''' , type=_snake_case , help='''Dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--attention_dropout''' , type=_snake_case , help='''Attention dropout probability (Optional). Goes into model.config''' , )
parser.add_argument('''--learning_rate''' , default=5e-5 , type=_snake_case , help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' , default='''linear''' , choices=_snake_case , metavar=_snake_case , type=_snake_case , help='''Learning rate scheduler''' , )
parser.add_argument('''--weight_decay''' , default=0.0 , type=_snake_case , help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=_snake_case , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' , default=0 , type=_snake_case , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' , default=4 , type=_snake_case , help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=_snake_case )
parser.add_argument('''--train_batch_size''' , default=32 , type=_snake_case )
parser.add_argument('''--eval_batch_size''' , default=32 , type=_snake_case )
parser.add_argument('''--adafactor''' , action='''store_true''' )
class __UpperCamelCase ( pl.Callback ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Dict , _A : Optional[int] , _A : Union[str, Any] ):
"""simple docstring"""
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class __UpperCamelCase ( pl.Callback ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Any , _A : Optional[int] , _A : Any ):
"""simple docstring"""
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(_snake_case )
class __UpperCamelCase ( pl.Callback ):
"""simple docstring"""
def UpperCAmelCase__ ( self : int , _A : Optional[Any] , _A : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = trainer.lr_schedulers[0]['''scheduler''']
__SCREAMING_SNAKE_CASE : List[Any] = {F'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(_snake_case )
def UpperCAmelCase__ ( self : str , _A : Any , _A : Tuple ):
"""simple docstring"""
rank_zero_info('''***** Validation results *****''' )
__SCREAMING_SNAKE_CASE : Dict = trainer.callback_metrics
# Log results
for key in sorted(_snake_case ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(_snake_case , str(metrics[key] ) ) )
def UpperCAmelCase__ ( self : str , _A : int , _A : Any ):
"""simple docstring"""
rank_zero_info('''***** Test results *****''' )
__SCREAMING_SNAKE_CASE : List[str] = trainer.callback_metrics
# Log and save results to file
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' )
with open(_snake_case , '''w''' ) as writer:
for key in sorted(_snake_case ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(_snake_case , str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(_snake_case , str(metrics[key] ) ) )
def a__ ( snake_case , snake_case ):
"""simple docstring"""
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'''--output_dir''' , default=str(Path(_UpperCAmelCase ).parent / '''test_run''' / '''model_checkpoints''' ) , type=_UpperCAmelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=_UpperCAmelCase , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=_UpperCAmelCase )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=_UpperCAmelCase , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=_UpperCAmelCase , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=_UpperCAmelCase , default=42 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(_UpperCAmelCase ).parent / '''test_run''' / '''dummy-train-data''' ) , type=_UpperCAmelCase , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def a__ ( snake_case , snake_case , snake_case=None , snake_case=True , snake_case=[] , snake_case=None , snake_case=None , **snake_case , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
__SCREAMING_SNAKE_CASE : str = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=_UpperCAmelCase )
# add custom checkpoints
if checkpoint_callback is None:
__SCREAMING_SNAKE_CASE : Dict = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(_UpperCAmelCase )
if logging_callback is None:
__SCREAMING_SNAKE_CASE : List[str] = LoggingCallback()
__SCREAMING_SNAKE_CASE : Dict = {}
if args.fpaa:
__SCREAMING_SNAKE_CASE : int = 16
if args.gpus > 1:
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''auto'''
__SCREAMING_SNAKE_CASE : int = '''ddp'''
__SCREAMING_SNAKE_CASE : Tuple = args.accumulate_grad_batches
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''auto'''
__SCREAMING_SNAKE_CASE : Any = pl.Trainer.from_argparse_args(
_UpperCAmelCase , weights_summary=_UpperCAmelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_UpperCAmelCase , val_check_interval=1 , num_sanity_val_steps=2 , **_UpperCAmelCase , )
if args.do_train:
trainer.fit(_UpperCAmelCase )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 74 |
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : nn.ModuleList , _UpperCAmelCase : nn.ModuleList , _UpperCAmelCase : List[int] ):
lowerCAmelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ), F'{len(_UpperCAmelCase )} != {len(_UpperCAmelCase )}'
dest_layers.load_state_dict(layers_to_copy.state_dict() )
__UpperCamelCase : Optional[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
__UpperCamelCase : int = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] ):
try:
lowerCAmelCase = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'
F' {n_student}' )
return list(range(_UpperCAmelCase ) )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ):
if n_student > n_teacher:
raise ValueError(F'Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}' )
elif n_teacher == n_student:
return list(range(_UpperCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, PreTrainedModel] , _UpperCAmelCase : Union[str, Path] = "student" , _UpperCAmelCase : Union[int, None] = None , _UpperCAmelCase : Union[int, None] = None , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[Any]=None , **_UpperCAmelCase : str , ):
lowerCAmelCase = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
AutoTokenizer.from_pretrained(_UpperCAmelCase ).save_pretrained(_UpperCAmelCase ) # purely for convenience
lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(_UpperCAmelCase ).eval()
else:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), F'teacher must be a model or string got type {type(_UpperCAmelCase )}'
lowerCAmelCase = teacher.config.to_diff_dict()
try:
lowerCAmelCase ,lowerCAmelCase = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
lowerCAmelCase = teacher_e
if d is None:
lowerCAmelCase = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
lowerCAmelCase ,lowerCAmelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
lowerCAmelCase ,lowerCAmelCase = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
lowerCAmelCase = teacher_e
if d is None:
lowerCAmelCase = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(_UpperCAmelCase )
# Copy weights
lowerCAmelCase = teacher.config_class(**_UpperCAmelCase )
lowerCAmelCase = AutoModelForSeqaSeqLM.from_config(_UpperCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
lowerCAmelCase = student.load_state_dict(teacher.state_dict() , strict=_UpperCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
lowerCAmelCase ,lowerCAmelCase = list(range(_UpperCAmelCase ) ), list(range(_UpperCAmelCase ) )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'
F' {save_path}' )
student.save_pretrained(_UpperCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
lowerCAmelCase = pick_layers_to_copy(_UpperCAmelCase , _UpperCAmelCase )
if d_layers_to_copy is None:
lowerCAmelCase = pick_layers_to_copy(_UpperCAmelCase , _UpperCAmelCase )
try:
if hasattr(
_UpperCAmelCase , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , _UpperCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , _UpperCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , _UpperCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , _UpperCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , _UpperCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , _UpperCAmelCase )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}' )
lowerCAmelCase = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(_UpperCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 4 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : List[str] = {
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _A( a__ ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = '''marian'''
UpperCamelCase : List[str] = ['''past_key_values''']
UpperCamelCase : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , _A=58101 , _A=None , _A=1024 , _A=12 , _A=4096 , _A=16 , _A=12 , _A=4096 , _A=16 , _A=0.0 , _A=0.0 , _A=True , _A=True , _A="gelu" , _A=1024 , _A=0.1 , _A=0.0 , _A=0.0 , _A=0.0_2 , _A=58100 , _A=False , _A=58100 , _A=0 , _A=0 , _A=True , **_A , ):
__A : Dict = vocab_size
__A : Optional[Any] = decoder_vocab_size or vocab_size
__A : int = max_position_embeddings
__A : Union[str, Any] = d_model
__A : Union[str, Any] = encoder_ffn_dim
__A : List[Any] = encoder_layers
__A : Union[str, Any] = encoder_attention_heads
__A : str = decoder_ffn_dim
__A : Dict = decoder_layers
__A : Union[str, Any] = decoder_attention_heads
__A : List[str] = dropout
__A : List[Any] = attention_dropout
__A : Union[str, Any] = activation_dropout
__A : int = activation_function
__A : Optional[Any] = init_std
__A : Any = encoder_layerdrop
__A : Tuple = decoder_layerdrop
__A : Optional[int] = use_cache
__A : str = encoder_layers
__A : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
__A : List[Any] = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=_snake_case , eos_token_id=_snake_case , is_encoder_decoder=_snake_case , decoder_start_token_id=_snake_case , forced_eos_token_id=_snake_case , **_snake_case , )
class _A( a__ ):
"""simple docstring"""
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def UpperCAmelCase_ ( self ):
if self.task in ["default", "seq2seq-lm"]:
__A : Dict = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__A : Dict = {0: 'batch'}
__A : Optional[int] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__A : Tuple = {0: 'batch', 1: 'decoder_sequence'}
__A : Tuple = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_snake_case , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__A : int = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__A , __A : Tuple = self.num_layers
for i in range(_snake_case ):
__A : Optional[int] = {0: 'batch', 2: 'past_sequence + sequence'}
__A : Optional[Any] = {0: 'batch', 2: 'past_sequence + sequence'}
else:
__A : Tuple = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def UpperCAmelCase_ ( self ):
if self.task in ["default", "seq2seq-lm"]:
__A : List[Any] = super().outputs
else:
__A : Any = super(_snake_case , self ).outputs
if self.use_past:
__A , __A : str = self.num_layers
for i in range(_snake_case ):
__A : Tuple = {0: 'batch', 2: 'past_sequence + sequence'}
__A : List[str] = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def UpperCAmelCase_ ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
__A : str = self._generate_dummy_inputs_for_encoder_and_decoder(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
# Generate decoder inputs
__A : Dict = seq_length if not self.use_past else 1
__A : Tuple = self._generate_dummy_inputs_for_encoder_and_decoder(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
__A : str = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
__A : Any = dict(**_snake_case , **_snake_case )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__A , __A : Union[str, Any] = common_inputs['input_ids'].shape
__A : Optional[int] = common_inputs['decoder_input_ids'].shape[1]
__A , __A : Tuple = self.num_attention_heads
__A : Optional[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__A : Optional[Any] = decoder_seq_length + 3
__A : Optional[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__A : Optional[int] = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(_snake_case , _snake_case )] , dim=1 )
__A : Any = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__A , __A : int = self.num_layers
__A : Optional[int] = min(_snake_case , _snake_case )
__A : Tuple = max(_snake_case , _snake_case ) - min_num_layers
__A : Any = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(_snake_case ):
common_inputs["past_key_values"].append(
(
torch.zeros(_snake_case ),
torch.zeros(_snake_case ),
torch.zeros(_snake_case ),
torch.zeros(_snake_case ),
) )
# TODO: test this.
__A : str = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(_snake_case , _snake_case ):
common_inputs["past_key_values"].append((torch.zeros(_snake_case ), torch.zeros(_snake_case )) )
return common_inputs
def UpperCAmelCase_ ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
__A : Tuple = self._generate_dummy_inputs_for_encoder_and_decoder(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__A , __A : Dict = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__A : List[Any] = seqlen + 2
__A , __A : Union[str, Any] = self.num_layers
__A , __A : Dict = self.num_attention_heads
__A : str = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__A : Optional[Any] = common_inputs['attention_mask'].dtype
__A : List[Any] = torch.cat(
[common_inputs['attention_mask'], torch.ones(_snake_case , _snake_case , dtype=_snake_case )] , dim=1 )
__A : Any = [
(torch.zeros(_snake_case ), torch.zeros(_snake_case )) for _ in range(_snake_case )
]
return common_inputs
def UpperCAmelCase_ ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
__A : List[Any] = compute_effective_axis_dimension(
_snake_case , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__A : List[Any] = tokenizer.num_special_tokens_to_add(_snake_case )
__A : Optional[Any] = compute_effective_axis_dimension(
_snake_case , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_snake_case )
# Generate dummy inputs according to compute batch and sequence
__A : List[Any] = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
__A : int = dict(tokenizer(_snake_case , return_tensors=_snake_case ) )
return common_inputs
def UpperCAmelCase_ ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
if self.task in ["default", "seq2seq-lm"]:
__A : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case )
else:
__A : Union[str, Any] = self._generate_dummy_inputs_for_causal_lm(
_snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case )
return common_inputs
def UpperCAmelCase_ ( self , _A , _A , _A , _A ):
if self.task in ["default", "seq2seq-lm"]:
__A : Optional[Any] = super()._flatten_past_key_values_(_snake_case , _snake_case , _snake_case , _snake_case )
else:
__A : Dict = super(_snake_case , self )._flatten_past_key_values_(
_snake_case , _snake_case , _snake_case , _snake_case )
@property
def UpperCAmelCase_ ( self ):
return 1e-4
| 239 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Dict = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 0 |
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
a = logging.getLogger()
def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , """all_results.json""" )
if os.path.exists(_UpperCAmelCase ):
with open(_UpperCAmelCase , """r""" ) as f:
__SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase )
else:
raise ValueError(f"""can\'t find {path}""" )
return results
a = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class __a ( a__ ):
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
import xla_spawn
__SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
__SCREAMING_SNAKE_CASE = f"""\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n """.split()
with patch.object(_snake_case ,"""argv""" ,_snake_case ):
__SCREAMING_SNAKE_CASE = time()
xla_spawn.main()
__SCREAMING_SNAKE_CASE = time()
__SCREAMING_SNAKE_CASE = get_results(_snake_case )
self.assertGreaterEqual(result["""eval_accuracy"""] ,0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start ,500 )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
import xla_spawn
__SCREAMING_SNAKE_CASE = """\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n """.split()
with patch.object(_snake_case ,"""argv""" ,_snake_case ):
xla_spawn.main()
| 109 |
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[float] ):
lowerCAmelCase = 0.00
lowerCAmelCase = 0
for resistor in resistors:
if resistor <= 0:
lowerCAmelCase = F'Resistor at index {index} has a negative or zero value!'
raise ValueError(_UpperCAmelCase )
first_sum += 1 / float(_UpperCAmelCase )
index += 1
return 1 / first_sum
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[float] ):
lowerCAmelCase = 0.00
lowerCAmelCase = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowerCAmelCase = F'Resistor at index {index} has a negative value!'
raise ValueError(_UpperCAmelCase )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = tmp_path / "cache"
lowerCamelCase = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase = TextDatasetReader(_UpperCAmelCase , cache_dir=_UpperCAmelCase , keep_in_memory=_UpperCAmelCase ).read()
_check_text_dataset(_UpperCAmelCase , _UpperCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = tmp_path / "cache"
lowerCamelCase = {"text": "string"}
lowerCamelCase = features.copy() if features else default_expected_features
lowerCamelCase = (
Features({feature: Value(_UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase = TextDatasetReader(_UpperCAmelCase , features=_UpperCAmelCase , cache_dir=_UpperCAmelCase ).read()
_check_text_dataset(_UpperCAmelCase , _UpperCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = tmp_path / "cache"
lowerCamelCase = {"text": "string"}
lowerCamelCase = TextDatasetReader(_UpperCAmelCase , cache_dir=_UpperCAmelCase , split=_UpperCAmelCase ).read()
_check_text_dataset(_UpperCAmelCase , _UpperCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
if issubclass(_UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase = text_path
elif issubclass(_UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase = [text_path]
lowerCamelCase = tmp_path / "cache"
lowerCamelCase = {"text": "string"}
lowerCamelCase = TextDatasetReader(_UpperCAmelCase , cache_dir=_UpperCAmelCase ).read()
_check_text_dataset(_UpperCAmelCase , _UpperCAmelCase )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=("train",) ):
"""simple docstring"""
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
for split in splits:
lowerCamelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = tmp_path / "cache"
lowerCamelCase = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase = TextDatasetReader({"train": text_path} , cache_dir=_UpperCAmelCase , keep_in_memory=_UpperCAmelCase ).read()
_check_text_datasetdict(_UpperCAmelCase , _UpperCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
lowerCamelCase = {"text": "string"}
lowerCamelCase = features.copy() if features else default_expected_features
lowerCamelCase = (
Features({feature: Value(_UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase = TextDatasetReader({"train": text_path} , features=_UpperCAmelCase , cache_dir=_UpperCAmelCase ).read()
_check_text_datasetdict(_UpperCAmelCase , _UpperCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
if split:
lowerCamelCase = {split: text_path}
else:
lowerCamelCase = "train"
lowerCamelCase = {"train": text_path, "test": text_path}
lowerCamelCase = tmp_path / "cache"
lowerCamelCase = {"text": "string"}
lowerCamelCase = TextDatasetReader(_UpperCAmelCase , cache_dir=_UpperCAmelCase ).read()
_check_text_datasetdict(_UpperCAmelCase , _UpperCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() ) | 623 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
'''vinvino02/glpn-kitti''': '''https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json''',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class a ( a__ ):
snake_case__ = '''glpn'''
def __init__( self , _snake_case=3 , _snake_case=4 , _snake_case=[2, 2, 2, 2] , _snake_case=[8, 4, 2, 1] , _snake_case=[32, 64, 1_60, 2_56] , _snake_case=[7, 3, 3, 3] , _snake_case=[4, 2, 2, 2] , _snake_case=[1, 2, 5, 8] , _snake_case=[4, 4, 4, 4] , _snake_case="gelu" , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=0.1 , _snake_case=1E-6 , _snake_case=64 , _snake_case=10 , _snake_case=-1 , **_snake_case , ):
"""simple docstring"""
super().__init__(**_snake_case )
lowerCAmelCase = num_channels
lowerCAmelCase = num_encoder_blocks
lowerCAmelCase = depths
lowerCAmelCase = sr_ratios
lowerCAmelCase = hidden_sizes
lowerCAmelCase = patch_sizes
lowerCAmelCase = strides
lowerCAmelCase = mlp_ratios
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = drop_path_rate
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = decoder_hidden_size
lowerCAmelCase = max_depth
lowerCAmelCase = head_in_index
| 4 | 0 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowerCamelCase :Union[str, Any] = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
lowerCamelCase :Any = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
lowerCamelCase :int = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
lowerCamelCase :List[Any] = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
lowerCamelCase :Dict = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]),
('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
lowerCamelCase :Optional[int] = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
lowerCamelCase :Optional[int] = (
('''JH AH TH KH QH''', 2_3),
('''JH 9H TH KH QH''', 2_2),
('''JC KH JS JD JH''', 2_1),
('''KH KC 3S 3H 3D''', 2_0),
('''8C 9C 5C 3C TC''', 1_9),
('''JS QS 9H TS KH''', 1_8),
('''7C 7S KH 2H 7H''', 1_7),
('''3C KH 5D 5S KH''', 1_6),
('''QH 8H KD JH 8S''', 1_5),
('''2D 6D 9D TH 7D''', 1_4),
)
def a ( ):
'''simple docstring'''
A_, A_ : Any = randrange(len(_UpperCAmelCase ) ), randrange(len(_UpperCAmelCase ) )
A_ : Union[str, Any] = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
A_, A_ : Optional[Any] = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def a ( lowerCamelCase__ = 1_00 ):
'''simple docstring'''
return (generate_random_hand() for _ in range(_UpperCAmelCase ))
@pytest.mark.parametrize("""hand, expected""" , _UpperCAmelCase )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
assert PokerHand(_UpperCAmelCase )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , _UpperCAmelCase )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
assert PokerHand(_UpperCAmelCase )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , _UpperCAmelCase )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = PokerHand(_UpperCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , _UpperCAmelCase )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
assert PokerHand(_UpperCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , _UpperCAmelCase )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
assert PokerHand(_UpperCAmelCase )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , _UpperCAmelCase )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
assert PokerHand(_UpperCAmelCase ).compare_with(PokerHand(_UpperCAmelCase ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
assert PokerHand(_UpperCAmelCase ).compare_with(PokerHand(_UpperCAmelCase ) ) == expected
def a ( ):
'''simple docstring'''
A_ : Tuple = [PokerHand(_UpperCAmelCase ) for hand in SORTED_HANDS]
A_ : str = poker_hands.copy()
shuffle(_UpperCAmelCase )
A_ : Union[str, Any] = chain(sorted(_UpperCAmelCase ) )
for index, hand in enumerate(_UpperCAmelCase ):
assert hand == poker_hands[index]
def a ( ):
'''simple docstring'''
A_ : List[Any] = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=_UpperCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def a ( ):
'''simple docstring'''
A_ : str = PokerHand("""2C 4S AS 3D 5C""" )
A_ : str = True
A_ : Union[str, Any] = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def a ( ):
'''simple docstring'''
A_ : Any = 0
A_ : str = os.path.abspath(os.path.dirname(_UpperCAmelCase ) )
A_ : List[Any] = os.path.join(_UpperCAmelCase , """poker_hands.txt""" )
with open(_UpperCAmelCase ) as file_hand:
for line in file_hand:
A_ : Tuple = line[:14].strip()
A_ : List[str] = line[15:].strip()
A_, A_ : Optional[Any] = PokerHand(_UpperCAmelCase ), PokerHand(_UpperCAmelCase )
A_ : str = player.compare_with(_UpperCAmelCase )
if output == "Win":
answer += 1
assert answer == 3_76 | 667 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class a :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=2 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , _snake_case=10_00 , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
lowerCAmelCase = range_bbox
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCAmelCase = bbox[i, j, 3]
lowerCAmelCase = bbox[i, j, 1]
lowerCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCAmelCase = bbox[i, j, 2]
lowerCAmelCase = bbox[i, j, 0]
lowerCAmelCase = t
lowerCAmelCase = tf.convert_to_tensor(_snake_case )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMModel(config=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , token_type_ids=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForMaskedLM(config=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFLayoutLMForSequenceClassification(config=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFLayoutLMForTokenClassification(config=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForQuestionAnswering(config=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
snake_case__ = (
{
'''feature-extraction''': TFLayoutLMModel,
'''fill-mask''': TFLayoutLMForMaskedLM,
'''text-classification''': TFLayoutLMForSequenceClassification,
'''token-classification''': TFLayoutLMForTokenClassification,
'''zero-shot''': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ = False
snake_case__ = True
snake_case__ = 1_0
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = TFLayoutLMModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ():
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
lowerCAmelCase = tf.convert_to_tensor([[101,1019,1014,1016,1037,1_2849,4747,1004,1_4246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,1_1300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,1_9274,2772,6205,2_7814,1_6147,1_6147,4343,2047,1_0283,1_0969,1_4389,1012,2338,102]] ) # noqa: E231
lowerCAmelCase = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
lowerCAmelCase = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
lowerCAmelCase = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
lowerCAmelCase = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class a ( unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase = model(input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
# test the sequence output on [0, :3, :3]
lowerCAmelCase = tf.convert_to_tensor(
[[0.1_785, -0.1_947, -0.0_425], [-0.3_254, -0.2_807, 0.2_553], [-0.5_391, -0.3_322, 0.3_364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _snake_case , atol=1E-3 ) )
# test the pooled output on [1, :3]
lowerCAmelCase = tf.convert_to_tensor([-0.6_580, -0.0_214, 0.8_552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _snake_case , atol=1E-3 ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase = model(
input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
lowerCAmelCase = outputs.loss
lowerCAmelCase = (2,)
self.assertEqual(loss.shape , _snake_case )
# test the shape of the logits
lowerCAmelCase = outputs.logits
lowerCAmelCase = (2, 2)
self.assertEqual(logits.shape , _snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=13 )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase = model(
input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
# test the shape of the logits
lowerCAmelCase = outputs.logits
lowerCAmelCase = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , _snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase = model(input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
# test the shape of the logits
lowerCAmelCase = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , _snake_case )
self.assertEqual(outputs.end_logits.shape , _snake_case )
| 4 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__snake_case : List[str] = None
__snake_case : List[Any] = logging.get_logger(__name__)
__snake_case : Tuple = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case : str = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
__snake_case : Optional[int] = {
'''moussaKam/mbarthez''': 1_024,
'''moussaKam/barthez''': 1_024,
'''moussaKam/barthez-orangesum-title''': 1_024,
}
__snake_case : Optional[Any] = '''▁'''
class A__ ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE = BarthezTokenizer
def __init__( self: Any , _SCREAMING_SNAKE_CASE: List[Any]=None , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , _SCREAMING_SNAKE_CASE: Optional[Any]="<s>" , _SCREAMING_SNAKE_CASE: Any="</s>" , _SCREAMING_SNAKE_CASE: Optional[Any]="</s>" , _SCREAMING_SNAKE_CASE: Dict="<s>" , _SCREAMING_SNAKE_CASE: Optional[Any]="<unk>" , _SCREAMING_SNAKE_CASE: Dict="<pad>" , _SCREAMING_SNAKE_CASE: List[Any]="<mask>" , **_SCREAMING_SNAKE_CASE: Any , ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : int = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case) if isinstance(_snake_case , _snake_case) else mask_token
super().__init__(
_snake_case , tokenizer_file=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , **_snake_case , )
__lowerCAmelCase : Optional[Any] = vocab_file
__lowerCAmelCase : List[Any] = False if not self.vocab_file else True
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Union[str, Any] = None) -> str:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCAmelCase : Optional[Any] = [self.cls_token_id]
__lowerCAmelCase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Dict = None) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
__lowerCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: str = None) -> Optional[Any]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(_snake_case):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
__lowerCAmelCase : Optional[int] = os.path.join(
_snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(_snake_case):
copyfile(self.vocab_file , _snake_case)
return (out_vocab_file,) | 293 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__UpperCamelCase : Union[str, Any] = '''examples/'''
__UpperCamelCase : str = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__UpperCamelCase : List[str] = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
__UpperCamelCase : Optional[int] = '''README.md'''
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ):
with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCAmelCase = f.read()
lowerCAmelCase ,lowerCAmelCase = REPLACE_PATTERNS[pattern]
lowerCAmelCase = replace.replace('VERSION' , _UpperCAmelCase )
lowerCAmelCase = re_pattern.sub(_UpperCAmelCase , _UpperCAmelCase )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] ):
for folder, directories, fnames in os.walk(_UpperCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase , pattern='examples' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Dict=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if not patch:
update_version_in_examples(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = '🤗 Transformers currently provides the following architectures'
lowerCAmelCase = '1. Want to contribute a new model?'
with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCAmelCase = f.readlines()
# Find the start of the list.
lowerCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
lowerCAmelCase = lines[index].replace(
'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , )
index += 1
with open(_UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ():
with open(REPLACE_FILES['init'] , 'r' ) as f:
lowerCAmelCase = f.read()
lowerCAmelCase = REPLACE_PATTERNS['init'][0].search(_UpperCAmelCase ).groups()[0]
return packaging.version.parse(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple=False ):
lowerCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
lowerCAmelCase = default_version.base_version
elif patch:
lowerCAmelCase = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
lowerCAmelCase = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
lowerCAmelCase = input(F'Which version are you releasing? [{default_version}]' )
if len(_UpperCAmelCase ) == 0:
lowerCAmelCase = default_version
print(F'Updating version to {version}.' )
global_version_update(_UpperCAmelCase , patch=_UpperCAmelCase )
if not patch:
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = get_version()
lowerCAmelCase = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
lowerCAmelCase = current_version.base_version
# Check with the user we got that right.
lowerCAmelCase = input(F'Which version are we developing now? [{dev_version}]' )
if len(_UpperCAmelCase ) == 0:
lowerCAmelCase = dev_version
print(F'Updating version to {version}.' )
global_version_update(_UpperCAmelCase )
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__UpperCamelCase : Optional[int] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 4 | 0 |
'''simple docstring'''
from typing import Any
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , __lowerCAmelCase ) -> int:
lowercase__ : List[Any] = data
lowercase__ : Dict = None
def __repr__( self ) -> Union[str, Any]:
return F"""Node({self.data})"""
class UpperCAmelCase :
'''simple docstring'''
def __init__( self ) -> List[str]:
lowercase__ : List[Any] = None
def __iter__( self ) -> Optional[int]:
lowercase__ : List[str] = self.head
while node:
yield node.data
lowercase__ : int = node.next
def __len__( self ) -> Dict:
return sum(1 for _ in self )
def __repr__( self ) -> Dict:
return "->".join([str(_snake_case ) for item in self] )
def __getitem__( self , __lowerCAmelCase ) -> Optional[int]:
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
lowercase__ : List[Any] = self.head
for _ in range(_snake_case ):
lowercase__ : str = current.next
lowercase__ : Optional[Any] = data
def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[str]:
self.insert_nth(len(self ) , _snake_case )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[Any]:
self.insert_nth(0 , _snake_case )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
if not 0 <= index <= len(self ):
raise IndexError('''list index out of range''' )
lowercase__ : Optional[int] = Node(_snake_case )
if self.head is None:
lowercase__ : Optional[Any] = new_node
elif index == 0:
lowercase__ : int = self.head # link new_node to head
lowercase__ : Any = new_node
else:
lowercase__ : List[Any] = self.head
for _ in range(index - 1 ):
lowercase__ : Any = temp.next
lowercase__ : Tuple = temp.next
lowercase__ : str = new_node
def _lowerCAmelCase( self ) -> Optional[Any]: # print every node data
print(self )
def _lowerCAmelCase( self ) -> Dict:
return self.delete_nth(0 )
def _lowerCAmelCase( self ) -> str: # delete from tail
return self.delete_nth(len(self ) - 1 )
def _lowerCAmelCase( self , __lowerCAmelCase = 0 ) -> int:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('''List index out of range.''' )
lowercase__ : int = self.head # default first node
if index == 0:
lowercase__ : Optional[int] = self.head.next
else:
lowercase__ : Tuple = self.head
for _ in range(index - 1 ):
lowercase__ : Tuple = temp.next
lowercase__ : Optional[Any] = temp.next
lowercase__ : Union[str, Any] = temp.next.next
return delete_node.data
def _lowerCAmelCase( self ) -> List[str]:
return self.head is None
def _lowerCAmelCase( self ) -> str:
lowercase__ : Dict = None
lowercase__ : Tuple = self.head
while current:
# Store the current node's next node.
lowercase__ : List[Any] = current.next
# Make the current node's next point backwards
lowercase__ : str = prev
# Make the previous node be the current node
lowercase__ : Union[str, Any] = current
# Make the current node the next node (to progress iteration)
lowercase__ : Tuple = next_node
# Return prev in order to put the head at the end
lowercase__ : List[str] = prev
def __UpperCamelCase ( ):
lowercase__ : Optional[int] = LinkedList()
assert linked_list.is_empty() is True
assert str(_UpperCAmelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_UpperCAmelCase ) == i
linked_list.insert_nth(_UpperCAmelCase , i + 1 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_UpperCAmelCase ) == 9
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowercase__ : int = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(-8 , 1 ) )
def __UpperCamelCase ( ):
lowercase__ : int = [
-9,
100,
Node(7734_5112 ),
'''dlrow olleH''',
7,
5555,
0,
-1_9_2.5_5_5_5_5,
'''Hello, world!''',
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
lowercase__ : Any = LinkedList()
for i in test_input:
linked_list.insert_tail(_UpperCAmelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_UpperCAmelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowercase__ : str = linked_list.delete_head()
assert result == -9
assert (
str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowercase__ : List[str] = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowercase__ : Optional[int] = linked_list.delete_nth(10 )
assert result is None
assert (
str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('''Hello again, world!''' ) )
assert (
str(_UpperCAmelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_UpperCAmelCase )
assert (
str(_UpperCAmelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_UpperCAmelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __UpperCamelCase ( ):
from doctest import testmod
testmod()
lowercase__ : int = LinkedList()
linked_list.insert_head(input('''Inserting 1st at head ''' ).strip() )
linked_list.insert_head(input('''Inserting 2nd at head ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
linked_list.insert_tail(input('''\nInserting 1st at tail ''' ).strip() )
linked_list.insert_tail(input('''Inserting 2nd at tail ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nDelete head''' )
linked_list.delete_head()
print('''Delete tail''' )
linked_list.delete_tail()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nReverse linked list''' )
linked_list.reverse()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nString representation of linked list:''' )
print(_UpperCAmelCase )
print('''\nReading/changing Node data using indexing:''' )
print(F"""Element at Position 1: {linked_list[1]}""" )
lowercase__ : Union[str, Any] = input('''Enter New Value: ''' ).strip()
print('''New list:''' )
print(_UpperCAmelCase )
print(F"""length of linked_list is : {len(_UpperCAmelCase )}""" )
if __name__ == "__main__":
main()
| 152 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__UpperCamelCase : Optional[int] = pytest.mark.integration
@require_faiss
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_snake_case ) for x in np.arange(30 ).tolist()]} )
return dset
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = self._create_dummy_dataset()
lowerCAmelCase = dset.map(
lambda _snake_case , _snake_case : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_snake_case , keep_in_memory=_snake_case )
lowerCAmelCase = dset.add_faiss_index('vecs' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_snake_case ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(_snake_case , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
from elasticsearch import Elasticsearch
lowerCAmelCase = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCAmelCase = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
lowerCAmelCase = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=_snake_case )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase = 1
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case )
self.assertRaises(_snake_case , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCAmelCase = np.eye(5 , dtype=np.floataa )[::-1]
lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case )
self.assertRaises(_snake_case , index.search_batch , queries[0] )
lowerCAmelCase = [scores[0] for scores in total_scores]
lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_snake_case ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCAmelCase = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(_snake_case ):
lowerCAmelCase = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = faiss.IndexFlat(5 )
lowerCAmelCase = FaissIndex(custom_index=_snake_case )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_snake_case ) as tmp_file:
index.save(tmp_file.name )
lowerCAmelCase = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase = 1
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict ):
import faiss
lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCAmelCase = 'index.faiss'
lowerCAmelCase = F'mock://{index_name}'
index.save(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCAmelCase = FaissIndex.load(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase = 1
lowerCAmelCase ,lowerCAmelCase = index.search(_UpperCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCAmelCase = Elasticsearch()
lowerCAmelCase = {'acknowledged': True}
lowerCAmelCase = ElasticSearchIndex(es_client=_snake_case )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
lowerCAmelCase = 'foo'
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCAmelCase = 'foo'
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCAmelCase = ['foo', 'bar', 'foobar']
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case )
lowerCAmelCase = [scores[0] for scores in total_scores]
lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_snake_case ) , 0 )
self.assertListEqual([1, 1, 1] , _snake_case )
# batched queries with timeout
lowerCAmelCase = ['foo', 'bar', 'foobar']
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case , request_timeout=30 )
lowerCAmelCase = [scores[0] for scores in total_scores]
lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_snake_case ) , 0 )
self.assertListEqual([1, 1, 1] , _snake_case )
| 4 | 0 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__A : Union[str, Any] = '''examples/'''
__A : str = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__A : List[str] = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
__A : Optional[int] = '''README.md'''
def A_ ( snake_case_ : Any ,snake_case_ : Dict ,snake_case_ : Any ):
'''simple docstring'''
with open(_UpperCAmelCase ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
UpperCamelCase : List[str] = f.read()
UpperCamelCase , UpperCamelCase : Dict = REPLACE_PATTERNS[pattern]
UpperCamelCase : Dict = replace.replace("""VERSION""" ,_UpperCAmelCase )
UpperCamelCase : Union[str, Any] = re_pattern.sub(_UpperCAmelCase ,_UpperCAmelCase )
with open(_UpperCAmelCase ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
f.write(_UpperCAmelCase )
def A_ ( snake_case_ : Union[str, Any] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(_UpperCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) ,_UpperCAmelCase ,pattern="""examples""" )
def A_ ( snake_case_ : List[str] ,snake_case_ : Dict=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
if not patch:
update_version_in_examples(_UpperCAmelCase )
def A_ ( ):
'''simple docstring'''
UpperCamelCase : List[Any] = """🤗 Transformers currently provides the following architectures"""
UpperCamelCase : Union[str, Any] = """1. Want to contribute a new model?"""
with open(_UpperCAmelCase ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
UpperCamelCase : Union[str, Any] = f.readlines()
# Find the start of the list.
UpperCamelCase : Tuple = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCamelCase : Union[str, Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
UpperCamelCase : int = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" ,"""https://huggingface.co/docs/transformers/model_doc""" ,)
index += 1
with open(_UpperCAmelCase ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
f.writelines(_UpperCAmelCase )
def A_ ( ):
'''simple docstring'''
with open(REPLACE_FILES["""init"""] ,"""r""" ) as f:
UpperCamelCase : Tuple = f.read()
UpperCamelCase : Union[str, Any] = REPLACE_PATTERNS["""init"""][0].search(_UpperCAmelCase ).groups()[0]
return packaging.version.parse(_UpperCAmelCase )
def A_ ( snake_case_ : Tuple=False ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can\'t create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
UpperCamelCase : Any = default_version.base_version
elif patch:
UpperCamelCase : Optional[Any] = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
UpperCamelCase : str = f'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
UpperCamelCase : List[Any] = input(f'Which version are you releasing? [{default_version}]' )
if len(_UpperCAmelCase ) == 0:
UpperCamelCase : List[str] = default_version
print(f'Updating version to {version}.' )
global_version_update(_UpperCAmelCase ,patch=_UpperCAmelCase )
if not patch:
print("""Cleaning main README, don\'t forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def A_ ( ):
'''simple docstring'''
UpperCamelCase : List[str] = get_version()
UpperCamelCase : Union[str, Any] = f'{current_version.major}.{current_version.minor + 1}.0.dev0'
UpperCamelCase : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
UpperCamelCase : Any = input(f'Which version are we developing now? [{dev_version}]' )
if len(_UpperCAmelCase ) == 0:
UpperCamelCase : List[str] = dev_version
print(f'Updating version to {version}.' )
global_version_update(_UpperCAmelCase )
print("""Cleaning main README, don\'t forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__A : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__A : Optional[int] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 499 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = IFInpaintingPipeline
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._get_dummy_components()
def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(_snake_case )
else:
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_local()
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 4 | 0 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowerCAmelCase : List[str] = FlaxXLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
__lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained("""xlm-roberta-base""" )
__lowerCAmelCase : Any = """The dog is cute and lives in the garden house"""
__lowerCAmelCase : List[Any] = jnp.array([tokenizer.encode(_snake_case )] )
__lowerCAmelCase : int = (1, 12, 7_68) # batch_size, sequence_length, embedding_vector_dim
__lowerCAmelCase : Dict = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
__lowerCAmelCase : int = model(_snake_case )["""last_hidden_state"""]
self.assertEqual(output.shape , _snake_case )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , _snake_case , atol=1e-3 ) )
| 651 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class a :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
lowerCAmelCase = self.vocab_size - 1
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , head_mask=_snake_case )
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTLMHeadModel(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTDoubleHeadsModel(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = OpenAIGPTForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class a ( a__ , a__ , a__ , unittest.TestCase ):
snake_case__ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
snake_case__ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
snake_case__ = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case=False ):
"""simple docstring"""
lowerCAmelCase = super()._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_snake_case , )
lowerCAmelCase = inputs_dict['labels']
lowerCAmelCase = inputs_dict['labels']
lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_snake_case , )
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_snake_case )
return inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , n_embd=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = OpenAIGPTModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@require_torch
class a ( unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(_snake_case )
lowerCAmelCase = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=_snake_case ) # the president is
lowerCAmelCase = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCAmelCase = model.generate(_snake_case , do_sample=_snake_case )
self.assertListEqual(output_ids[0].tolist() , _snake_case )
| 4 | 0 |
class UpperCamelCase__ :
def __init__( self : Dict , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase_ = set_counts
lowercase_ = max(_snake_case )
lowercase_ = len(_snake_case )
lowercase_ = [1] * num_sets
lowercase_ = list(range(_snake_case ) )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.get_parent(_snake_case )
lowercase_ = self.get_parent(_snake_case )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowercase_ = 0
lowercase_ = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowercase_ = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowercase_ = 0
lowercase_ = src_parent
lowercase_ = self.set_counts[src_parent]
lowercase_ = max(self.max_set , _snake_case )
return True
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
lowercase_ = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 412 |
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__UpperCamelCase : str = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path' , type=_UpperCAmelCase , default='data/dump.txt' , help='The path to the data.' )
parser.add_argument('--tokenizer_type' , type=_UpperCAmelCase , default='bert' , choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name' , type=_UpperCAmelCase , default='bert-base-uncased' , help='The tokenizer to use.' )
parser.add_argument('--dump_file' , type=_UpperCAmelCase , default='data/dump' , help='The dump file prefix.' )
lowerCAmelCase = parser.parse_args()
logger.info(F'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
lowerCAmelCase = BertTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase = tokenizer.special_tokens_map['cls_token'] # `[CLS]`
lowerCAmelCase = tokenizer.special_tokens_map['sep_token'] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowerCAmelCase = RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase = tokenizer.special_tokens_map['cls_token'] # `<s>`
lowerCAmelCase = tokenizer.special_tokens_map['sep_token'] # `</s>`
elif args.tokenizer_type == "gpt2":
lowerCAmelCase = GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>`
lowerCAmelCase = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>`
logger.info(F'Loading text from {args.file_path}' )
with open(args.file_path , 'r' , encoding='utf8' ) as fp:
lowerCAmelCase = fp.readlines()
logger.info('Start encoding' )
logger.info(F'{len(_UpperCAmelCase )} examples to process.' )
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 1_0000
lowerCAmelCase = time.time()
for text in data:
lowerCAmelCase = F'{bos} {text.strip()} {sep}'
lowerCAmelCase = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
rslt.append(_UpperCAmelCase )
iter += 1
if iter % interval == 0:
lowerCAmelCase = time.time()
logger.info(F'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
lowerCAmelCase = time.time()
logger.info('Finished binarization' )
logger.info(F'{len(_UpperCAmelCase )} examples processed.' )
lowerCAmelCase = F'{args.dump_file}.{args.tokenizer_name}.pickle'
lowerCAmelCase = tokenizer.vocab_size
if vocab_size < (1 << 16):
lowerCAmelCase = [np.uintaa(_UpperCAmelCase ) for d in rslt]
else:
lowerCAmelCase = [np.intaa(_UpperCAmelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'Dump to {dp_file}' )
with open(_UpperCAmelCase , 'wb' ) as handle:
pickle.dump(rslt_ , _UpperCAmelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 4 | 0 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Dict ):
# Load checkpoint
__UpperCAmelCase : Optional[int] = torch.load(_UpperCAmelCase , map_location="""cpu""" )
__UpperCAmelCase : List[str] = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
__UpperCAmelCase : Union[str, Any] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
__UpperCAmelCase : Union[str, Any] = v
else:
__UpperCAmelCase : Optional[Any] = v
__UpperCAmelCase : str = chkpt["""params"""]
__UpperCAmelCase : Any = {n: v for n, v in config.items() if not isinstance(_UpperCAmelCase , (torch.FloatTensor, numpy.ndarray) )}
__UpperCAmelCase : str = chkpt["""dico_word2id"""]
__UpperCAmelCase : List[str] = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""" , """""" ): i for s, i in vocab.items()}
# Save pytorch-model
__UpperCAmelCase : str = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
__UpperCAmelCase : Optional[Any] = pytorch_dump_folder_path + """/""" + CONFIG_NAME
__UpperCAmelCase : Optional[Any] = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(_UpperCAmelCase , indent=2 ) + """\n""" )
print(f"""Save vocab file to {pytorch_config_dump_path}""" )
with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(_UpperCAmelCase , indent=2 ) + """\n""" )
if __name__ == "__main__":
a : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a : List[Any] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 63 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class a ( a__ ):
snake_case__ = '''bert'''
def __init__( self , _snake_case=3_05_22 , _snake_case=7_68 , _snake_case=12 , _snake_case=12 , _snake_case=30_72 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=0 , _snake_case="absolute" , _snake_case=True , _snake_case=None , **_snake_case , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , **_snake_case )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
lowerCAmelCase = classifier_dropout
class a ( a__ ):
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 4 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = '''▁'''
lowercase_ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowercase_ = {
'''vocab_file''': {
'''facebook/mbart-large-50-one-to-many-mmt''': (
'''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'''
),
}
}
lowercase_ = {
'''facebook/mbart-large-50-one-to-many-mmt''': 1_024,
}
# fmt: off
lowercase_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI''']
class __UpperCamelCase ( a__ ):
"""simple docstring"""
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
lowerCAmelCase_ = []
lowerCAmelCase_ = []
def __init__( self : Any , _A : str , _A : List[str]=None , _A : List[Any]=None , _A : Union[str, Any]="</s>" , _A : Union[str, Any]="</s>" , _A : int="<s>" , _A : str="<unk>" , _A : int="<pad>" , _A : Any="<mask>" , _A : List[Any] = None , **_A : Optional[Any] , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
__SCREAMING_SNAKE_CASE : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
__SCREAMING_SNAKE_CASE : List[str] = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_snake_case , tgt_lang=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
__SCREAMING_SNAKE_CASE : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_snake_case ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__SCREAMING_SNAKE_CASE : Dict = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__SCREAMING_SNAKE_CASE : int = 1
__SCREAMING_SNAKE_CASE : str = len(self.sp_model )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_snake_case )
}
__SCREAMING_SNAKE_CASE : List[str] = {v: k for k, v in self.lang_code_to_id.items()}
__SCREAMING_SNAKE_CASE : str = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__SCREAMING_SNAKE_CASE : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__SCREAMING_SNAKE_CASE : Optional[int] = src_lang if src_lang is not None else '''en_XX'''
__SCREAMING_SNAKE_CASE : List[Any] = self.lang_code_to_id[self._src_lang]
__SCREAMING_SNAKE_CASE : List[str] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def UpperCAmelCase__ ( self : List[str] , _A : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.__dict__.copy()
__SCREAMING_SNAKE_CASE : Tuple = None
return state
def __setstate__( self : Optional[int] , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__SCREAMING_SNAKE_CASE : Tuple = {}
__SCREAMING_SNAKE_CASE : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__ ( self : Optional[Any] , _A : str ):
"""simple docstring"""
return self.sp_model.encode(_snake_case , out_type=_snake_case )
def UpperCAmelCase__ ( self : List[str] , _A : str ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__SCREAMING_SNAKE_CASE : Optional[Any] = self.sp_model.PieceToId(_snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase__ ( self : Dict , _A : str ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase__ ( self : Any , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = []
__SCREAMING_SNAKE_CASE : Tuple = ''''''
__SCREAMING_SNAKE_CASE : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_snake_case ) + token
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
__SCREAMING_SNAKE_CASE : List[Any] = []
else:
current_sub_tokens.append(_snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = False
out_string += self.sp_model.decode(_snake_case )
return out_string.strip()
def UpperCAmelCase__ ( self : Optional[Any] , _A : List[str] , _A : int = None ):
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__SCREAMING_SNAKE_CASE : Dict = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , '''wb''' ) as fi:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
def UpperCAmelCase__ ( self : Optional[int] , _A : List[str] , _A : Optional[int] = None , _A : Tuple = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
__SCREAMING_SNAKE_CASE : Tuple = [1] * len(self.prefix_tokens )
__SCREAMING_SNAKE_CASE : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_snake_case )) + suffix_ones
return prefix_ones + ([0] * len(_snake_case )) + ([0] * len(_snake_case )) + suffix_ones
def UpperCAmelCase__ ( self : Tuple , _A : int , _A : List[str] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase__ ( self : Any , _A : str , _A : List[Any] , _A : Union[str, Any] , _A : str , **_A : Any ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = src_lang
__SCREAMING_SNAKE_CASE : int = self(_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , **_snake_case )
__SCREAMING_SNAKE_CASE : Any = self.convert_tokens_to_ids(_snake_case )
__SCREAMING_SNAKE_CASE : int = tgt_lang_id
return inputs
def UpperCAmelCase__ ( self : str , _A : List[Any] , _A : List[Any] = "en_XX" , _A : List[Any] = None , _A : Any = "ro_RO" , **_A : Tuple , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = src_lang
__SCREAMING_SNAKE_CASE : List[str] = tgt_lang
return super().prepare_seqaseq_batch(_snake_case , _snake_case , **_snake_case )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase__ ( self : Tuple , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.lang_code_to_id[src_lang]
__SCREAMING_SNAKE_CASE : Any = [self.cur_lang_code_id]
__SCREAMING_SNAKE_CASE : Dict = [self.eos_token_id]
def UpperCAmelCase__ ( self : List[str] , _A : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = self.lang_code_to_id[tgt_lang]
__SCREAMING_SNAKE_CASE : Any = [self.cur_lang_code_id]
__SCREAMING_SNAKE_CASE : int = [self.eos_token_id]
| 74 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a ( a__ , unittest.TestCase ):
snake_case__ = DanceDiffusionPipeline
snake_case__ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {
'''callback''',
'''latents''',
'''callback_steps''',
'''output_type''',
'''num_images_per_prompt''',
}
snake_case__ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_snake_case , use_timestep_embedding=_snake_case , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
lowerCAmelCase = IPNDMScheduler()
lowerCAmelCase = {
'unet': unet,
'scheduler': scheduler,
}
return components
def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(_snake_case )
else:
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowerCAmelCase = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = DanceDiffusionPipeline(**_snake_case )
lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = self.get_dummy_inputs(_snake_case )
lowerCAmelCase = pipe(**_snake_case )
lowerCAmelCase = output.audios
lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowerCAmelCase = np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = torch_device
lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(generator=_snake_case , num_inference_steps=1_00 , audio_length_in_s=4.096 )
lowerCAmelCase = output.audios
lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase = np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = torch_device
lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(generator=_snake_case , num_inference_steps=1_00 , audio_length_in_s=4.096 )
lowerCAmelCase = output.audios
lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase = np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 4 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
UpperCAmelCase : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase : int = {
'''vocab_file''': {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''unc-nlp/lxmert-base-uncased''': (
'''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase : List[str] = {
'''unc-nlp/lxmert-base-uncased''': 5_12,
}
UpperCAmelCase : Optional[int] = {
'''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True},
}
class _A( a__ ):
"""simple docstring"""
UpperCamelCase : int = VOCAB_FILES_NAMES
UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Dict = LxmertTokenizer
def __init__( self , _A=None , _A=None , _A=True , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A=True , _A=None , **_A , ):
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
__A : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _snake_case ) != do_lower_case
or normalizer_state.get('strip_accents' , _snake_case ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _snake_case ) != tokenize_chinese_chars
):
__A : str = getattr(_snake_case , normalizer_state.pop('type' ) )
__A : Optional[Any] = do_lower_case
__A : str = strip_accents
__A : Optional[int] = tokenize_chinese_chars
__A : Union[str, Any] = normalizer_class(**_snake_case )
__A : Dict = do_lower_case
def UpperCAmelCase_ ( self , _A , _A=None ):
__A : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self , _A , _A = None ):
__A : List[Any] = [self.sep_token_id]
__A : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self , _A , _A = None ):
__A : Union[str, Any] = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 239 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class a :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=False , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , use_stable_embedding=_snake_case , )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenLlamaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = OpenLlamaModel(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , )
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , )
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = OpenLlamaForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = OpenLlamaForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
# first forward pass
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , use_cache=_snake_case , )
lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , output_hidden_states=_snake_case , )['hidden_states'][0]
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , past_key_values=_snake_case , output_hidden_states=_snake_case , )['hidden_states'][0]
# select random slice
lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-3 ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a ( a__ , a__ , a__ , unittest.TestCase ):
snake_case__ = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
snake_case__ = (OpenLlamaForCausalLM,) if is_torch_available() else ()
snake_case__ = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = OpenLlamaModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase = type
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(_snake_case )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = 'single_label_classification'
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(_snake_case )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = 'multi_label_classification'
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(_snake_case )
lowerCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = ids_tensor([1, 10] , config.vocab_size )
lowerCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase = OpenLlamaModel(_snake_case )
original_model.to(_snake_case )
original_model.eval()
lowerCAmelCase = original_model(_snake_case ).last_hidden_state
lowerCAmelCase = original_model(_snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase = {'type': scaling_type, 'factor': 10.0}
lowerCAmelCase = OpenLlamaModel(_snake_case )
scaled_model.to(_snake_case )
scaled_model.eval()
lowerCAmelCase = scaled_model(_snake_case ).last_hidden_state
lowerCAmelCase = scaled_model(_snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
| 4 | 0 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(_UpperCAmelCase , int(b / 2 ) ) * actual_power(_UpperCAmelCase , int(b / 2 ) )
else:
return a * actual_power(_UpperCAmelCase , int(b / 2 ) ) * actual_power(_UpperCAmelCase , int(b / 2 ) )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if b < 0:
return 1 / actual_power(_UpperCAmelCase , _UpperCAmelCase )
return actual_power(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 109 |
"""simple docstring"""
from typing import Any
class a :
def __init__( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = data
lowerCAmelCase = None
def __repr__( self ):
"""simple docstring"""
return F'Node({self.data})'
class a :
def __init__( self ):
"""simple docstring"""
lowerCAmelCase = None
def __iter__( self ):
"""simple docstring"""
lowerCAmelCase = self.head
while node:
yield node.data
lowerCAmelCase = node.next
def __len__( self ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ):
"""simple docstring"""
return "->".join([str(_snake_case ) for item in self] )
def __getitem__( self , _snake_case ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , _snake_case , _snake_case ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
lowerCAmelCase = self.head
for _ in range(_snake_case ):
lowerCAmelCase = current.next
lowerCAmelCase = data
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
self.insert_nth(len(self ) , _snake_case )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
self.insert_nth(0 , _snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
lowerCAmelCase = Node(_snake_case )
if self.head is None:
lowerCAmelCase = new_node
elif index == 0:
lowerCAmelCase = self.head # link new_node to head
lowerCAmelCase = new_node
else:
lowerCAmelCase = self.head
for _ in range(index - 1 ):
lowerCAmelCase = temp.next
lowerCAmelCase = temp.next
lowerCAmelCase = new_node
def UpperCamelCase__ ( self ): # print every node data
"""simple docstring"""
print(self )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.delete_nth(0 )
def UpperCamelCase__ ( self ): # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase__ ( self , _snake_case = 0 ):
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
lowerCAmelCase = self.head # default first node
if index == 0:
lowerCAmelCase = self.head.next
else:
lowerCAmelCase = self.head
for _ in range(index - 1 ):
lowerCAmelCase = temp.next
lowerCAmelCase = temp.next
lowerCAmelCase = temp.next.next
return delete_node.data
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.head is None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = None
lowerCAmelCase = self.head
while current:
# Store the current node's next node.
lowerCAmelCase = current.next
# Make the current node's next point backwards
lowerCAmelCase = prev
# Make the previous node be the current node
lowerCAmelCase = current
# Make the current node the next node (to progress iteration)
lowerCAmelCase = next_node
# Return prev in order to put the head at the end
lowerCAmelCase = prev
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = LinkedList()
assert linked_list.is_empty() is True
assert str(_UpperCAmelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_UpperCAmelCase ) == i
linked_list.insert_nth(_UpperCAmelCase , i + 1 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_UpperCAmelCase ) == 9
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCAmelCase = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(-8 , 1 ) )
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = [
-9,
100,
Node(7734_5112 ),
'dlrow olleH',
7,
5555,
0,
-192.5_5555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
lowerCAmelCase = LinkedList()
for i in test_input:
linked_list.insert_tail(_UpperCAmelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_UpperCAmelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCAmelCase = linked_list.delete_head()
assert result == -9
assert (
str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCAmelCase = linked_list.delete_tail()
assert result == 12.2
assert (
str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCAmelCase = linked_list.delete_nth(10 )
assert result is None
assert (
str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_UpperCAmelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_UpperCAmelCase )
assert (
str(_UpperCAmelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_UpperCAmelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _SCREAMING_SNAKE_CASE ():
from doctest import testmod
testmod()
lowerCAmelCase = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_UpperCAmelCase )
print('\nReading/changing Node data using indexing:' )
print(F'Element at Position 1: {linked_list[1]}' )
lowerCAmelCase = input('Enter New Value: ' ).strip()
print('New list:' )
print(_UpperCAmelCase )
print(F'length of linked_list is : {len(_UpperCAmelCase )}' )
if __name__ == "__main__":
main()
| 4 | 0 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
a_ : int = TypeVar('T')
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
return (position - 1) // 2
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
return (2 * position) + 1
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
return (2 * position) + 2
class lowerCamelCase__ ( Generic[T]):
"""simple docstring"""
def __init__(self ):
'''simple docstring'''
lowerCamelCase = []
lowerCamelCase = {}
lowerCamelCase = 0
def __len__(self ):
'''simple docstring'''
return self.elements
def __repr__(self ):
'''simple docstring'''
return str(self.heap )
def _a (self ):
'''simple docstring'''
return self.elements == 0
def _a (self , __a , __a ):
'''simple docstring'''
self.heap.append((elem, weight) )
lowerCamelCase = self.elements
self.elements += 1
self._bubble_up(_snake_case )
def _a (self ):
'''simple docstring'''
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
lowerCamelCase , lowerCamelCase = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
lowerCamelCase , lowerCamelCase = self.heap[0]
self._bubble_down(_snake_case )
return elem
def _a (self , __a , __a ):
'''simple docstring'''
lowerCamelCase = self.position_map[elem]
lowerCamelCase = (elem, weight)
if position > 0:
lowerCamelCase = get_parent_position(_snake_case )
lowerCamelCase , lowerCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_snake_case )
else:
self._bubble_down(_snake_case )
else:
self._bubble_down(_snake_case )
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = self.position_map[elem]
if curr_pos == 0:
return None
lowerCamelCase = get_parent_position(_snake_case )
lowerCamelCase , lowerCamelCase = self.heap[curr_pos]
lowerCamelCase , lowerCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_snake_case , _snake_case )
return self._bubble_up(_snake_case )
return None
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = self.position_map[elem]
lowerCamelCase , lowerCamelCase = self.heap[curr_pos]
lowerCamelCase = get_child_left_position(_snake_case )
lowerCamelCase = get_child_right_position(_snake_case )
if child_left_position < self.elements and child_right_position < self.elements:
lowerCamelCase , lowerCamelCase = self.heap[child_left_position]
lowerCamelCase , lowerCamelCase = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_snake_case , _snake_case )
return self._bubble_down(_snake_case )
if child_left_position < self.elements:
lowerCamelCase , lowerCamelCase = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_snake_case , _snake_case )
return self._bubble_down(_snake_case )
else:
return None
if child_right_position < self.elements:
lowerCamelCase , lowerCamelCase = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_snake_case , _snake_case )
return self._bubble_down(_snake_case )
return None
def _a (self , __a , __a ):
'''simple docstring'''
lowerCamelCase = self.heap[nodea_pos][0]
lowerCamelCase = self.heap[nodea_pos][0]
lowerCamelCase , lowerCamelCase = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
lowerCamelCase = nodea_pos
lowerCamelCase = nodea_pos
class lowerCamelCase__ ( Generic[T]):
"""simple docstring"""
def __init__(self ):
'''simple docstring'''
lowerCamelCase = {}
lowerCamelCase = 0
def __repr__(self ):
'''simple docstring'''
return str(self.connections )
def __len__(self ):
'''simple docstring'''
return self.nodes
def _a (self , __a ):
'''simple docstring'''
if node not in self.connections:
lowerCamelCase = {}
self.nodes += 1
def _a (self , __a , __a , __a ):
'''simple docstring'''
self.add_node(_snake_case )
self.add_node(_snake_case )
lowerCamelCase = weight
lowerCamelCase = weight
def __lowercase( UpperCAmelCase__ , ):
"""simple docstring"""
lowerCamelCase = {node: maxsize for node in graph.connections}
lowerCamelCase = {node: None for node in graph.connections}
lowerCamelCase = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(_UpperCAmelCase , _UpperCAmelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
lowerCamelCase = priority_queue.extract_min()
lowerCamelCase = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowerCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_UpperCAmelCase , dist[neighbour] )
lowerCamelCase = node
# running prim's algorithm
while not priority_queue.is_empty():
lowerCamelCase = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowerCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_UpperCAmelCase , dist[neighbour] )
lowerCamelCase = node
return dist, parent | 623 |
"""simple docstring"""
from __future__ import annotations
import requests
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
lowerCAmelCase = F'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(_UpperCAmelCase ).json()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 10 ):
lowerCAmelCase = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
lowerCAmelCase = requests.get(_UpperCAmelCase ).json()[:max_stories]
return [get_hackernews_story(_UpperCAmelCase ) for story_id in story_ids]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 10 ):
lowerCAmelCase = hackernews_top_stories(_UpperCAmelCase )
return "\n".join('* [{title}]({url})'.format(**_UpperCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 4 | 0 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('''.''')
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"""`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got """
f'{test_file} instead.' )
A_ : Dict = components[-1]
if not test_fn.endswith("""py""" ):
raise ValueError(f'`test_file` should be a python file. Got {test_fn} instead.' )
if not test_fn.startswith("""test_modeling_""" ):
raise ValueError(
f'`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.' )
A_ : Union[str, Any] = components[:-1] + [test_fn.replace(""".py""" , """""" )]
A_ : Optional[int] = """.""".join(_UpperCAmelCase )
return test_module_path
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = get_module_path(_UpperCAmelCase )
A_ : Optional[int] = importlib.import_module(_UpperCAmelCase )
return test_module
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : str = []
A_ : List[str] = get_test_module(_UpperCAmelCase )
for attr in dir(_UpperCAmelCase ):
if attr.endswith("""ModelTester""" ):
tester_classes.append(getattr(_UpperCAmelCase , _UpperCAmelCase ) )
# sort with class names
return sorted(_UpperCAmelCase , key=lambda lowerCamelCase__ : x.__name__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = []
A_ : List[Any] = get_test_module(_UpperCAmelCase )
for attr in dir(_UpperCAmelCase ):
A_ : Tuple = getattr(_UpperCAmelCase , _UpperCAmelCase )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
A_ : Tuple = getattr(_UpperCAmelCase , """all_model_classes""" , [] )
if len(_UpperCAmelCase ) > 0:
test_classes.append(_UpperCAmelCase )
# sort with class names
return sorted(_UpperCAmelCase , key=lambda lowerCamelCase__ : x.__name__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = get_test_classes(_UpperCAmelCase )
A_ : int = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_UpperCAmelCase , key=lambda lowerCamelCase__ : x.__name__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = test_class()
if hasattr(_UpperCAmelCase , """setUp""" ):
test.setUp()
A_ : Optional[int] = None
if hasattr(_UpperCAmelCase , """model_tester""" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
A_ : List[str] = test.model_tester.__class__
return model_tester
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = get_test_classes(_UpperCAmelCase )
A_ : Optional[int] = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_UpperCAmelCase )
# sort with class names
return sorted(_UpperCAmelCase , key=lambda lowerCamelCase__ : x.__name__ )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : int = get_test_classes_for_model(_UpperCAmelCase , _UpperCAmelCase )
A_ : List[str] = []
for test_class in test_classes:
A_ : Optional[int] = get_model_tester_from_test_class(_UpperCAmelCase )
if tester_class is not None:
tester_classes.append(_UpperCAmelCase )
# sort with class names
return sorted(_UpperCAmelCase , key=lambda lowerCamelCase__ : x.__name__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = get_test_classes(_UpperCAmelCase )
A_ : Tuple = {test_class: get_model_tester_from_test_class(_UpperCAmelCase ) for test_class in test_classes}
return test_tester_mapping
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = get_model_classes(_UpperCAmelCase )
A_ : int = {
model_class: get_test_classes_for_model(_UpperCAmelCase , _UpperCAmelCase ) for model_class in model_classes
}
return model_test_mapping
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : str = get_model_classes(_UpperCAmelCase )
A_ : Optional[Any] = {
model_class: get_tester_classes_for_model(_UpperCAmelCase , _UpperCAmelCase ) for model_class in model_classes
}
return model_to_tester_mapping
def a ( lowerCamelCase__ ):
'''simple docstring'''
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return o
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return o.__name__
elif isinstance(_UpperCAmelCase , (list, tuple) ):
return [to_json(_UpperCAmelCase ) for x in o]
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return {to_json(_UpperCAmelCase ): to_json(_UpperCAmelCase ) for k, v in o.items()}
else:
return o | 667 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any ):
lowerCAmelCase = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCAmelCase = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowerCAmelCase = 4
lowerCAmelCase = 48
lowerCAmelCase = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCAmelCase = [6, 6, 6, 6]
lowerCAmelCase = 60
lowerCAmelCase = [6, 6, 6, 6]
lowerCAmelCase = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCAmelCase = 4
lowerCAmelCase = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowerCAmelCase = 1
lowerCAmelCase = 1
lowerCAmelCase = 126
lowerCAmelCase = 7
lowerCAmelCase = 255.0
lowerCAmelCase = ''
return config
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ):
if "patch_embed.proj" in name and "layers" not in name:
lowerCAmelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
lowerCAmelCase = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
lowerCAmelCase = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
lowerCAmelCase = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
lowerCAmelCase = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowerCAmelCase = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowerCAmelCase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowerCAmelCase = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowerCAmelCase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCAmelCase = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
lowerCAmelCase = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
lowerCAmelCase = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
lowerCAmelCase = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
lowerCAmelCase = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
lowerCAmelCase = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
lowerCAmelCase = 'layernorm.weight'
if name == "norm.bias":
lowerCAmelCase = 'layernorm.bias'
if "conv_first" in name:
lowerCAmelCase = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowerCAmelCase = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowerCAmelCase = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
lowerCAmelCase = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
lowerCAmelCase = name.replace('upsample.2' , 'upsample.convolution_1' )
lowerCAmelCase = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
lowerCAmelCase = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
lowerCAmelCase = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
lowerCAmelCase = 'swin2sr.' + name
return name
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Dict ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase = orig_state_dict.pop(_UpperCAmelCase )
if "qkv" in key:
lowerCAmelCase = key.split('.' )
lowerCAmelCase = int(key_split[1] )
lowerCAmelCase = int(key_split[4] )
lowerCAmelCase = config.embed_dim
if "weight" in key:
lowerCAmelCase = val[:dim, :]
lowerCAmelCase = val[dim : dim * 2, :]
lowerCAmelCase = val[-dim:, :]
else:
lowerCAmelCase = val[:dim]
lowerCAmelCase = val[dim : dim * 2]
lowerCAmelCase = val[-dim:]
pass
else:
lowerCAmelCase = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple ):
lowerCAmelCase = get_config(_UpperCAmelCase )
lowerCAmelCase = SwinaSRForImageSuperResolution(_UpperCAmelCase )
model.eval()
lowerCAmelCase = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location='cpu' )
lowerCAmelCase = convert_state_dict(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase ,lowerCAmelCase = model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
raise ValueError('Missing keys when converting: {}'.format(_UpperCAmelCase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F'Unexpected key {key} in state_dict' )
# verify values
lowerCAmelCase = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
lowerCAmelCase = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('RGB' )
lowerCAmelCase = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowerCAmelCase = 126 if 'Jpeg' in checkpoint_url else 256
lowerCAmelCase = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowerCAmelCase = transforms(_UpperCAmelCase ).unsqueeze(0 )
if config.num_channels == 1:
lowerCAmelCase = pixel_values[:, 0, :, :].unsqueeze(1 )
lowerCAmelCase = model(_UpperCAmelCase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowerCAmelCase = torch.Size([1, 3, 512, 512] )
lowerCAmelCase = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCAmelCase = torch.Size([1, 3, 1024, 1024] )
lowerCAmelCase = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowerCAmelCase = torch.Size([1, 3, 1024, 1024] )
lowerCAmelCase = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCAmelCase = torch.Size([1, 3, 512, 512] )
lowerCAmelCase = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCAmelCase = torch.Size([1, 3, 1024, 1024] )
lowerCAmelCase = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), F'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , _UpperCAmelCase , atol=1e-3 )
print('Looks ok!' )
lowerCAmelCase = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
lowerCAmelCase = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_UpperCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
model.push_to_hub(F'caidas/{model_name}' )
processor.push_to_hub(F'caidas/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
__UpperCamelCase : Optional[int] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 | 0 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__snake_case : Dict = logging.get_logger(__name__)
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case=None ,__snake_case=None ) -> Tuple:
# Recurse if needed
if "." in tensor_name:
__lowerCAmelCase : Union[str, Any] = tensor_name.split("." )
for split in splits[:-1]:
__lowerCAmelCase : List[str] = getattr(_UpperCAmelCase ,_UpperCAmelCase )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
__lowerCAmelCase : Optional[Any] = new_module
__lowerCAmelCase : str = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
__lowerCAmelCase : int = tensor_name in module._buffers
__lowerCAmelCase : List[Any] = getattr(_UpperCAmelCase ,_UpperCAmelCase )
if old_value.device == torch.device("meta" ) and device not in ["meta", torch.device("meta" )] and value is None:
raise ValueError(F"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
__lowerCAmelCase : str = False
__lowerCAmelCase : str = False
if is_buffer or not is_bitsandbytes_available():
__lowerCAmelCase : int = False
__lowerCAmelCase : str = False
else:
__lowerCAmelCase : Union[str, Any] = hasattr(bnb.nn ,"Params4bit" ) and isinstance(module._parameters[tensor_name] ,bnb.nn.Paramsabit )
__lowerCAmelCase : List[Any] = isinstance(module._parameters[tensor_name] ,bnb.nn.IntaParams )
if is_abit or is_abit:
__lowerCAmelCase : List[str] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
__lowerCAmelCase : List[str] = old_value.to(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase ,torch.Tensor ):
__lowerCAmelCase : Optional[int] = value.to("cpu" )
if value.dtype == torch.inta:
__lowerCAmelCase : List[Any] = version.parse(importlib.metadata.version("bitsandbytes" ) ) > version.parse(
"0.37.2" )
if not is_abit_serializable:
raise ValueError(
"Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. "
"Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." )
else:
__lowerCAmelCase : int = torch.tensor(_UpperCAmelCase ,device="cpu" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls ,_UpperCAmelCase ) and fpaa_statistics is None:
__lowerCAmelCase : Tuple = new_value.T
__lowerCAmelCase : int = old_value.__dict__
if is_abit:
__lowerCAmelCase : Any = bnb.nn.IntaParams(_UpperCAmelCase ,requires_grad=_UpperCAmelCase ,**_UpperCAmelCase ).to(_UpperCAmelCase )
elif is_abit:
__lowerCAmelCase : Dict = bnb.nn.Paramsabit(_UpperCAmelCase ,requires_grad=_UpperCAmelCase ,**_UpperCAmelCase ).to(_UpperCAmelCase )
__lowerCAmelCase : int = new_value
if fpaa_statistics is not None:
setattr(module.weight ,"SCB" ,fpaa_statistics.to(_UpperCAmelCase ) )
else:
if value is None:
__lowerCAmelCase : Optional[Any] = old_value.to(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase ,torch.Tensor ):
__lowerCAmelCase : Optional[int] = value.to(_UpperCAmelCase )
else:
__lowerCAmelCase : Tuple = torch.tensor(_UpperCAmelCase ,device=_UpperCAmelCase )
if is_buffer:
__lowerCAmelCase : Dict = new_value
else:
__lowerCAmelCase : Optional[Any] = nn.Parameter(_UpperCAmelCase ,requires_grad=old_value.requires_grad )
__lowerCAmelCase : Optional[Any] = new_value
def _lowercase ( __snake_case ,__snake_case=None ,__snake_case=None ,__snake_case=None ,__snake_case=False ) -> Optional[int]:
for name, module in model.named_children():
if current_key_name is None:
__lowerCAmelCase : Any = []
current_key_name.append(_UpperCAmelCase )
if (isinstance(_UpperCAmelCase ,nn.Linear ) or isinstance(_UpperCAmelCase ,_UpperCAmelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in ".".join(_UpperCAmelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
__lowerCAmelCase , __lowerCAmelCase : Any = module.weight.shape
else:
__lowerCAmelCase : Optional[int] = module.in_features
__lowerCAmelCase : List[Any] = module.out_features
if quantization_config.quantization_method() == "llm_int8":
__lowerCAmelCase : int = bnb.nn.LinearabitLt(
_UpperCAmelCase ,_UpperCAmelCase ,module.bias is not None ,has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight ,threshold=quantization_config.llm_inta_threshold ,)
__lowerCAmelCase : Optional[int] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
__lowerCAmelCase : Dict = bnb.nn.Linearabit(
_UpperCAmelCase ,_UpperCAmelCase ,module.bias is not None ,quantization_config.bnb_abit_compute_dtype ,compress_statistics=quantization_config.bnb_abit_use_double_quant ,quant_type=quantization_config.bnb_abit_quant_type ,)
__lowerCAmelCase : Tuple = True
# Store the module class in case we need to transpose the weight later
__lowerCAmelCase : str = type(_UpperCAmelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_UpperCAmelCase )
if len(list(module.children() ) ) > 0:
__lowerCAmelCase , __lowerCAmelCase : int = _replace_with_bnb_linear(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,has_been_replaced=_UpperCAmelCase ,)
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _lowercase ( __snake_case ,__snake_case=None ,__snake_case=None ,__snake_case=None ) -> List[Any]:
__lowerCAmelCase : List[Any] = ["lm_head"] if modules_to_not_convert is None else modules_to_not_convert
__lowerCAmelCase , __lowerCAmelCase : str = _replace_with_bnb_linear(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def _lowercase ( *__snake_case ,**__snake_case ) -> str:
warnings.warn(
"`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead" ,_UpperCAmelCase ,)
return replace_with_bnb_linear(*_UpperCAmelCase ,**_UpperCAmelCase )
def _lowercase ( *__snake_case ,**__snake_case ) -> List[str]:
warnings.warn(
"`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead" ,_UpperCAmelCase ,)
return set_module_quantized_tensor_to_device(*_UpperCAmelCase ,**_UpperCAmelCase )
def _lowercase ( __snake_case ) -> Any:
__lowerCAmelCase : Dict = deepcopy(_UpperCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
__lowerCAmelCase : Optional[Any] = find_tied_parameters(_UpperCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
__lowerCAmelCase : Any = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
__lowerCAmelCase : Tuple = sum(_UpperCAmelCase ,[] )
__lowerCAmelCase : List[str] = len(_UpperCAmelCase ) > 0
# Check if it is a base model
__lowerCAmelCase : Optional[Any] = not hasattr(_UpperCAmelCase ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
__lowerCAmelCase : Any = list(model.named_children() )
__lowerCAmelCase : Optional[int] = [list_modules[-1][0]]
# add last module together with tied weights
__lowerCAmelCase : List[Any] = set(_UpperCAmelCase ) - set(_UpperCAmelCase )
__lowerCAmelCase : Any = list(set(_UpperCAmelCase ) ) + list(_UpperCAmelCase )
# remove ".weight" from the keys
__lowerCAmelCase : Optional[Any] = [".weight", ".bias"]
__lowerCAmelCase : Dict = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
__lowerCAmelCase : Any = name.replace(_UpperCAmelCase ,"" )
filtered_module_names.append(_UpperCAmelCase )
return filtered_module_names | 293 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class a ( a__ ):
snake_case__ = '''megatron-bert'''
def __init__( self , _snake_case=2_90_56 , _snake_case=10_24 , _snake_case=24 , _snake_case=16 , _snake_case=40_96 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=0 , _snake_case="absolute" , _snake_case=True , **_snake_case , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , **_snake_case )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
| 4 | 0 |
'''simple docstring'''
import math
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : int = len(_UpperCAmelCase )
lowercase__ : Union[str, Any] = int(math.floor(math.sqrt(_UpperCAmelCase ) ) )
lowercase__ : Any = 0
while arr[min(_UpperCAmelCase , _UpperCAmelCase ) - 1] < x:
lowercase__ : List[str] = step
step += int(math.floor(math.sqrt(_UpperCAmelCase ) ) )
if prev >= n:
return -1
while arr[prev] < x:
lowercase__ : Dict = prev + 1
if prev == min(_UpperCAmelCase , _UpperCAmelCase ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
__a: str = input("""Enter numbers separated by a comma:\n""").strip()
__a: Any = [int(item) for item in user_input.split(""",""")]
__a: Dict = int(input("""Enter the number to be searched:\n"""))
__a: List[str] = jump_search(arr, x)
if res == -1:
print("""Number not found!""")
else:
print(F'Number {x} is at index {res}')
| 152 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
__A : Tuple = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
__A : Tuple = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
__A : Any = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def a_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
if return_pvalue:
UpperCamelCase : List[str] = pearsonr(_snake_case , _snake_case )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(_snake_case , _snake_case )[0] )}
| 499 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class a ( a__ ):
snake_case__ = 42
class a ( a__ , a__ ):
@register_to_config
def __init__( self , _snake_case = 3 , _snake_case = 3 , _snake_case = ("DownEncoderBlock2D",) , _snake_case = ("UpDecoderBlock2D",) , _snake_case = (64,) , _snake_case = 1 , _snake_case = "silu" , _snake_case = 3 , _snake_case = 32 , _snake_case = 2_56 , _snake_case = 32 , _snake_case = None , _snake_case = 0.18_215 , _snake_case = "group" , ):
"""simple docstring"""
super().__init__()
# pass init params to Encoder
lowerCAmelCase = Encoder(
in_channels=_snake_case , out_channels=_snake_case , down_block_types=_snake_case , block_out_channels=_snake_case , layers_per_block=_snake_case , act_fn=_snake_case , norm_num_groups=_snake_case , double_z=_snake_case , )
lowerCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCAmelCase = nn.Convad(_snake_case , _snake_case , 1 )
lowerCAmelCase = VectorQuantizer(_snake_case , _snake_case , beta=0.25 , remap=_snake_case , sane_index_shape=_snake_case )
lowerCAmelCase = nn.Convad(_snake_case , _snake_case , 1 )
# pass init params to Decoder
lowerCAmelCase = Decoder(
in_channels=_snake_case , out_channels=_snake_case , up_block_types=_snake_case , block_out_channels=_snake_case , layers_per_block=_snake_case , act_fn=_snake_case , norm_num_groups=_snake_case , norm_type=_snake_case , )
@apply_forward_hook
def UpperCamelCase__ ( self , _snake_case , _snake_case = True ):
"""simple docstring"""
lowerCAmelCase = self.encoder(_snake_case )
lowerCAmelCase = self.quant_conv(_snake_case )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_snake_case )
@apply_forward_hook
def UpperCamelCase__ ( self , _snake_case , _snake_case = False , _snake_case = True ):
"""simple docstring"""
if not force_not_quantize:
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = self.quantize(_snake_case )
else:
lowerCAmelCase = h
lowerCAmelCase = self.post_quant_conv(_snake_case )
lowerCAmelCase = self.decoder(_snake_case , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case = True ):
"""simple docstring"""
lowerCAmelCase = sample
lowerCAmelCase = self.encode(_snake_case ).latents
lowerCAmelCase = self.decode(_snake_case ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_snake_case )
| 4 | 0 |
from __future__ import annotations
def snake_case_ (__A : list , __A : int , __A : int , __A : int ) -> List[Any]:
__lowerCAmelCase : List[Any] = []
__lowerCAmelCase ,__lowerCAmelCase : Any = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__lowerCAmelCase : Optional[int] = result + left + right
return input_list
def snake_case_ (__A : list ) -> List[Any]:
if len(_UpperCAmelCase ) <= 1:
return input_list
__lowerCAmelCase : List[Any] = list(_UpperCAmelCase )
# iteration for two-way merging
__lowerCAmelCase : Union[str, Any] = 2
while p <= len(_UpperCAmelCase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase ):
__lowerCAmelCase : Optional[Any] = i
__lowerCAmelCase : Optional[int] = i + p - 1
__lowerCAmelCase : int = (low + high + 1) // 2
__lowerCAmelCase : Tuple = merge(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# final merge of last two parts
if p * 2 >= len(_UpperCAmelCase ):
__lowerCAmelCase : str = i
__lowerCAmelCase : Optional[Any] = merge(_UpperCAmelCase , 0 , _UpperCAmelCase , len(_UpperCAmelCase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
__UpperCAmelCase = input("""Enter numbers separated by a comma:\n""").strip()
if user_input == "":
__UpperCAmelCase = []
else:
__UpperCAmelCase = [int(item.strip()) for item in user_input.split(""",""")]
print(iter_merge_sort(unsorted))
| 651 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
__UpperCamelCase : Optional[Any] = tuple[int, int]
class a :
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = vertices
lowerCAmelCase = {
(min(_snake_case ), max(_snake_case )): weight for edge, weight in edges.items()
}
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowerCAmelCase = weight
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = Graph({min(self.vertices )} , {} )
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowerCAmelCase = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowerCAmelCase = edge
lowerCAmelCase = weight
subgraph.add_edge(_snake_case , _snake_case )
return subgraph
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "p107_network.txt" ):
lowerCAmelCase = os.path.abspath(os.path.dirname(_UpperCAmelCase ) )
lowerCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = {}
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
with open(_UpperCAmelCase ) as f:
lowerCAmelCase = f.read().strip().split('\n' )
lowerCAmelCase = [line.split(',' ) for line in data]
for edgea in range(1 , len(_UpperCAmelCase ) ):
for edgea in range(_UpperCAmelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
lowerCAmelCase = int(adjaceny_matrix[edgea][edgea] )
lowerCAmelCase = Graph(set(range(len(_UpperCAmelCase ) ) ) , _UpperCAmelCase )
lowerCAmelCase = graph.prims_algorithm()
lowerCAmelCase = sum(graph.edges.values() )
lowerCAmelCase = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 4 | 0 |
import os
# Precomputes a list of the 100 first triangular numbers
a = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def UpperCAmelCase_ ( ):
lowercase_ = os.path.dirname(os.path.realpath(_UpperCAmelCase ) )
lowercase_ = os.path.join(_UpperCAmelCase , """words.txt""" )
lowercase_ = """"""
with open(_UpperCAmelCase ) as f:
lowercase_ = f.readline()
lowercase_ = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
lowercase_ = [
word
for word in [sum(ord(_UpperCAmelCase ) - 6_4 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 412 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ):
lowerCAmelCase = np.array([[1, item, train_mtch[i]] for i, item in enumerate(_UpperCAmelCase )] )
lowerCAmelCase = np.array(_UpperCAmelCase )
lowerCAmelCase = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , _UpperCAmelCase ) ) , x.transpose() ) , _UpperCAmelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ):
lowerCAmelCase = (1, 2, 1)
lowerCAmelCase = (1, 1, 0, 7)
lowerCAmelCase = SARIMAX(
_UpperCAmelCase , exog=_UpperCAmelCase , order=_UpperCAmelCase , seasonal_order=_UpperCAmelCase )
lowerCAmelCase = model.fit(disp=_UpperCAmelCase , maxiter=600 , method='nm' )
lowerCAmelCase = model_fit.predict(1 , len(_UpperCAmelCase ) , exog=[test_match] )
return result[0]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ):
lowerCAmelCase = SVR(kernel='rbf' , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = regressor.predict(_UpperCAmelCase )
return y_pred[0]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list ):
train_user.sort()
lowerCAmelCase = np.percentile(_UpperCAmelCase , 25 )
lowerCAmelCase = np.percentile(_UpperCAmelCase , 75 )
lowerCAmelCase = qa - qa
lowerCAmelCase = qa - (iqr * 0.1)
return low_lim
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : float ):
lowerCAmelCase = 0
lowerCAmelCase = 0
for i in list_vote:
if i > actual_result:
lowerCAmelCase = not_safe + 1
else:
if abs(abs(_UpperCAmelCase ) - abs(_UpperCAmelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
__UpperCamelCase : Optional[Any] = [[1_8231, 0.0, 1], [2_2621, 1.0, 2], [1_5675, 0.0, 3], [2_3583, 1.0, 4]]
__UpperCamelCase : Any = pd.DataFrame(
data_input, columns=['''total_user''', '''total_even''', '''days''']
)
__UpperCamelCase : Dict = Normalizer().fit_transform(data_input_df.values)
# split data
__UpperCamelCase : Dict = normalize_df[:, 2].tolist()
__UpperCamelCase : Union[str, Any] = normalize_df[:, 0].tolist()
__UpperCamelCase : List[str] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
__UpperCamelCase : Optional[int] = normalize_df[:, [1, 2]].tolist()
__UpperCamelCase : Tuple = x[: len(x) - 1]
__UpperCamelCase : Any = x[len(x) - 1 :]
# for linear regression & sarimax
__UpperCamelCase : str = total_date[: len(total_date) - 1]
__UpperCamelCase : Union[str, Any] = total_user[: len(total_user) - 1]
__UpperCamelCase : List[Any] = total_match[: len(total_match) - 1]
__UpperCamelCase : Optional[Any] = total_date[len(total_date) - 1 :]
__UpperCamelCase : str = total_user[len(total_user) - 1 :]
__UpperCamelCase : str = total_match[len(total_match) - 1 :]
# voting system with forecasting
__UpperCamelCase : Any = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
__UpperCamelCase : List[str] = '''''' if data_safety_checker(res_vote, tst_user) else '''not '''
print('''Today\'s data is {not_str}safe.''')
| 4 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a : Optional[int] = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
a : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 |
"""simple docstring"""
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=_UpperCAmelCase , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=_UpperCAmelCase , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=_UpperCAmelCase , default=42 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=_UpperCAmelCase , default=0 , help='cuda_id.' , )
lowerCAmelCase = parser.parse_args()
return args
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] ):
if not len(_UpperCAmelCase ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
lowerCAmelCase ,lowerCAmelCase = imgs[0].size
lowerCAmelCase = Image.new('RGB' , size=(cols * w, rows * h) )
lowerCAmelCase ,lowerCAmelCase = grid.size
for i, img in enumerate(_UpperCAmelCase ):
grid.paste(_UpperCAmelCase , box=(i % cols * w, i // cols * h) )
return grid
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any]="robotic cat with wings" , _UpperCAmelCase : Optional[int]=7.5 , _UpperCAmelCase : Dict=50 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : int=42 , ):
lowerCAmelCase = torch.Generator(pipeline.device ).manual_seed(_UpperCAmelCase )
lowerCAmelCase = pipeline(
_UpperCAmelCase , guidance_scale=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , generator=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , ).images
lowerCAmelCase = int(math.sqrt(_UpperCAmelCase ) )
lowerCAmelCase = image_grid(_UpperCAmelCase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
__UpperCamelCase : Optional[Any] = parse_args()
# Load models and create wrapper for stable diffusion
__UpperCamelCase : List[Any] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
__UpperCamelCase : str = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
__UpperCamelCase : Optional[int] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
__UpperCamelCase : List[str] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
__UpperCamelCase : Tuple = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__UpperCamelCase : Union[str, Any] = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
__UpperCamelCase : Dict = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
__UpperCamelCase : Dict = unet.to(torch.device('''cuda''', args.cuda_id))
__UpperCamelCase : Optional[Any] = pipeline.to(unet.device)
__UpperCamelCase ,__UpperCamelCase : List[Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
__UpperCamelCase : int = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 4 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class __UpperCamelCase ( a__ ):
"""simple docstring"""
lowerCAmelCase_ = '''dpt'''
def __init__( self : Any , _A : Dict=768 , _A : str=12 , _A : str=12 , _A : Union[str, Any]=3072 , _A : List[Any]="gelu" , _A : str=0.0 , _A : Union[str, Any]=0.0 , _A : List[Any]=0.02 , _A : str=1e-12 , _A : Union[str, Any]=384 , _A : List[str]=16 , _A : Optional[Any]=3 , _A : Tuple=False , _A : Any=True , _A : List[str]=[2, 5, 8, 11] , _A : str="project" , _A : Union[str, Any]=[4, 2, 1, 0.5] , _A : List[Any]=[96, 192, 384, 768] , _A : Any=256 , _A : Any=-1 , _A : Dict=False , _A : List[Any]=True , _A : Dict=0.4 , _A : Optional[int]=255 , _A : str=0.1 , _A : Dict=[1, 1024, 24, 24] , _A : str=[0, 1] , _A : Optional[int]=None , **_A : Optional[int] , ):
"""simple docstring"""
super().__init__(**_snake_case )
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
__SCREAMING_SNAKE_CASE : Tuple = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
__SCREAMING_SNAKE_CASE : int = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
__SCREAMING_SNAKE_CASE : Dict = BitConfig(**_snake_case )
elif isinstance(_snake_case , _snake_case ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
__SCREAMING_SNAKE_CASE : Tuple = BitConfig(**_snake_case )
elif isinstance(_snake_case , _snake_case ):
__SCREAMING_SNAKE_CASE : Optional[int] = backbone_config
else:
raise ValueError(
F'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
__SCREAMING_SNAKE_CASE : List[str] = backbone_featmap_shape
__SCREAMING_SNAKE_CASE : str = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : Any = None
__SCREAMING_SNAKE_CASE : List[Any] = []
__SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : List[str] = intermediate_size
__SCREAMING_SNAKE_CASE : List[str] = hidden_act
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Dict = initializer_range
__SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
__SCREAMING_SNAKE_CASE : Dict = patch_size
__SCREAMING_SNAKE_CASE : List[Any] = num_channels
__SCREAMING_SNAKE_CASE : Optional[Any] = qkv_bias
__SCREAMING_SNAKE_CASE : List[Any] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
__SCREAMING_SNAKE_CASE : int = readout_type
__SCREAMING_SNAKE_CASE : str = reassemble_factors
__SCREAMING_SNAKE_CASE : Union[str, Any] = neck_hidden_sizes
__SCREAMING_SNAKE_CASE : Optional[int] = fusion_hidden_size
__SCREAMING_SNAKE_CASE : Optional[int] = head_in_index
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE : str = use_auxiliary_head
__SCREAMING_SNAKE_CASE : str = auxiliary_loss_weight
__SCREAMING_SNAKE_CASE : List[Any] = semantic_loss_ignore_index
__SCREAMING_SNAKE_CASE : Any = semantic_classifier_dropout
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.backbone_config.to_dict()
__SCREAMING_SNAKE_CASE : Any = self.__class__.model_type
return output
| 74 |
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : nn.ModuleList , _UpperCAmelCase : nn.ModuleList , _UpperCAmelCase : List[int] ):
lowerCAmelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ), F'{len(_UpperCAmelCase )} != {len(_UpperCAmelCase )}'
dest_layers.load_state_dict(layers_to_copy.state_dict() )
__UpperCamelCase : Optional[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
__UpperCamelCase : int = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] ):
try:
lowerCAmelCase = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'
F' {n_student}' )
return list(range(_UpperCAmelCase ) )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ):
if n_student > n_teacher:
raise ValueError(F'Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}' )
elif n_teacher == n_student:
return list(range(_UpperCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, PreTrainedModel] , _UpperCAmelCase : Union[str, Path] = "student" , _UpperCAmelCase : Union[int, None] = None , _UpperCAmelCase : Union[int, None] = None , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[Any]=None , **_UpperCAmelCase : str , ):
lowerCAmelCase = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
AutoTokenizer.from_pretrained(_UpperCAmelCase ).save_pretrained(_UpperCAmelCase ) # purely for convenience
lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(_UpperCAmelCase ).eval()
else:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), F'teacher must be a model or string got type {type(_UpperCAmelCase )}'
lowerCAmelCase = teacher.config.to_diff_dict()
try:
lowerCAmelCase ,lowerCAmelCase = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
lowerCAmelCase = teacher_e
if d is None:
lowerCAmelCase = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
lowerCAmelCase ,lowerCAmelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
lowerCAmelCase ,lowerCAmelCase = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
lowerCAmelCase = teacher_e
if d is None:
lowerCAmelCase = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(_UpperCAmelCase )
# Copy weights
lowerCAmelCase = teacher.config_class(**_UpperCAmelCase )
lowerCAmelCase = AutoModelForSeqaSeqLM.from_config(_UpperCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
lowerCAmelCase = student.load_state_dict(teacher.state_dict() , strict=_UpperCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
lowerCAmelCase ,lowerCAmelCase = list(range(_UpperCAmelCase ) ), list(range(_UpperCAmelCase ) )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'
F' {save_path}' )
student.save_pretrained(_UpperCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
lowerCAmelCase = pick_layers_to_copy(_UpperCAmelCase , _UpperCAmelCase )
if d_layers_to_copy is None:
lowerCAmelCase = pick_layers_to_copy(_UpperCAmelCase , _UpperCAmelCase )
try:
if hasattr(
_UpperCAmelCase , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , _UpperCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , _UpperCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , _UpperCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , _UpperCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , _UpperCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , _UpperCAmelCase )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}' )
lowerCAmelCase = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(_UpperCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 4 | 0 |
def _SCREAMING_SNAKE_CASE ( a ) -> Tuple:
return str(_UpperCAmelCase ) == str(_UpperCAmelCase )[::-1]
def _SCREAMING_SNAKE_CASE ( a ) -> List[str]:
return int(_UpperCAmelCase ) + int(str(_UpperCAmelCase )[::-1] )
def _SCREAMING_SNAKE_CASE ( a = 1_00_00 ) -> int:
__A : Optional[Any] = []
for num in range(1 , _UpperCAmelCase ):
__A : Optional[Any] = 0
__A : Dict = num
while iterations < 50:
__A : Tuple = sum_reverse(_UpperCAmelCase )
iterations += 1
if is_palindrome(_UpperCAmelCase ):
break
else:
lychrel_nums.append(_UpperCAmelCase )
return len(_UpperCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 239 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Dict = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 0 |
'''simple docstring'''
import os
def __magic_name__ ( __UpperCAmelCase = "input.txt" ) -> str:
'''simple docstring'''
with open(os.path.join(os.path.dirname(_UpperCAmelCase ) , _UpperCAmelCase ) ) as input_file:
__SCREAMING_SNAKE_CASE = [
[int(_UpperCAmelCase ) for element in line.split(""",""" )]
for line in input_file.readlines()
]
__SCREAMING_SNAKE_CASE = len(_UpperCAmelCase )
__SCREAMING_SNAKE_CASE = len(matrix[0] )
__SCREAMING_SNAKE_CASE = [[-1 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )]
for i in range(_UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = matrix[i][0]
for j in range(1 , _UpperCAmelCase ):
for i in range(_UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , _UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
__SCREAMING_SNAKE_CASE = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 109 |
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[float] ):
lowerCAmelCase = 0.00
lowerCAmelCase = 0
for resistor in resistors:
if resistor <= 0:
lowerCAmelCase = F'Resistor at index {index} has a negative or zero value!'
raise ValueError(_UpperCAmelCase )
first_sum += 1 / float(_UpperCAmelCase )
index += 1
return 1 / first_sum
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[float] ):
lowerCAmelCase = 0.00
lowerCAmelCase = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowerCAmelCase = F'Resistor at index {index} has a negative value!'
raise ValueError(_UpperCAmelCase )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
a_ : int = re.compile(R'^(?P<major>\d+)' R'\.(?P<minor>\d+)' R'\.(?P<patch>\d+)$')
@total_ordering
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
_A = 42
_A = None
_A = None
_A = None
_A = None
def _a (self ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase , lowerCamelCase = _str_to_version_tuple(self.version_str )
def __repr__(self ):
'''simple docstring'''
return F"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"""
@property
def _a (self ):
'''simple docstring'''
return self.major, self.minor, self.patch
def _a (self , __a ):
'''simple docstring'''
if isinstance(_snake_case , _snake_case ):
return Version(_snake_case )
elif isinstance(_snake_case , _snake_case ):
return other
raise TypeError(F"""{other} (type {type(_snake_case )}) cannot be compared to version.""" )
def __eq__(self , __a ):
'''simple docstring'''
try:
lowerCamelCase = self._validate_operand(_snake_case )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__(self , __a ):
'''simple docstring'''
lowerCamelCase = self._validate_operand(_snake_case )
return self.tuple < other.tuple
def __hash__(self ):
'''simple docstring'''
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def _a (cls , __a ):
'''simple docstring'''
lowerCamelCase = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def _a (self ):
'''simple docstring'''
return self.version_str
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = _VERSION_REG.match(_UpperCAmelCase )
if not res:
raise ValueError(F"""Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.""" )
return tuple(int(_UpperCAmelCase ) for v in [res.group("major" ), res.group("minor" ), res.group("patch" )] )
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
return ".".join(str(_UpperCAmelCase ) for v in version_tuple ) | 623 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
'''vinvino02/glpn-kitti''': '''https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json''',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class a ( a__ ):
snake_case__ = '''glpn'''
def __init__( self , _snake_case=3 , _snake_case=4 , _snake_case=[2, 2, 2, 2] , _snake_case=[8, 4, 2, 1] , _snake_case=[32, 64, 1_60, 2_56] , _snake_case=[7, 3, 3, 3] , _snake_case=[4, 2, 2, 2] , _snake_case=[1, 2, 5, 8] , _snake_case=[4, 4, 4, 4] , _snake_case="gelu" , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=0.1 , _snake_case=1E-6 , _snake_case=64 , _snake_case=10 , _snake_case=-1 , **_snake_case , ):
"""simple docstring"""
super().__init__(**_snake_case )
lowerCAmelCase = num_channels
lowerCAmelCase = num_encoder_blocks
lowerCAmelCase = depths
lowerCAmelCase = sr_ratios
lowerCAmelCase = hidden_sizes
lowerCAmelCase = patch_sizes
lowerCAmelCase = strides
lowerCAmelCase = mlp_ratios
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = drop_path_rate
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = decoder_hidden_size
lowerCAmelCase = max_depth
lowerCAmelCase = head_in_index
| 4 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Optional[int] = tf.convert_to_tensor(
[
[
8.2_22_09_91, # 3rd highest value; idx. 0
-0.5_62_00_44,
5.23_22_97_52,
4.0_38_63_93,
-6.8_79_83_78,
-0.54_78_58_02,
-3.2_01_21_53,
2.92_77_71_76,
1.88_17_19_53,
7.35_34_12_76, # 5th highest value; idx. 9
8.43_20_78_33, # 2nd highest value; idx. 10
-9.85_71_18_36,
-5.96_20_92_36,
-1.13_03_91_61,
-7.1_11_52_94,
-0.8_36_96_33,
-5.3_18_64_08,
7.06_42_74_07,
0.81_36_93_44,
-0.82_02_38_17,
-5.9_17_97_96,
0.58_81_34_43,
-6.99_77_84_38,
4.71_55_11_89,
-0.18_77_16_37,
7.44_02_07_59, # 4th highest value; idx. 25
9.38_45_09_87, # 1st highest value; idx. 26
2.12_66_29_41,
-9.32_56_20_38,
2.35_65_25_22,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_42_55_18,
4.53_13_92_38,
-5.57_51_04_64,
-6.28_03_06_99,
-7.19_52_95_03,
-4.02_12_25_51,
1.39_33_70_37,
-6.06_70_70_57,
1.59_48_05_17,
-9.64_31_19,
0.03_90_77_99,
0.67_23_17_62,
-8.88_20_67_26,
6.27_11_59_22, # 4th highest value; idx. 13
2.28_52_07_23,
4.82_76_75_06,
4.30_42_13_68,
8.8_27_53_13, # 2nd highest value; idx. 17
5.44_02_99_58, # 5th highest value; idx. 18
-4.4_73_57_94,
7.38_57_95_36, # 3rd highest value; idx. 20
-2.91_05_16_63,
2.61_94_60_77,
-2.5_67_47_62,
-9.48_95_93_02,
-4.02_92_26_45,
-1.35_41_69_18,
9.67_70_23_23, # 1st highest value; idx. 27
-5.89_47_85_53,
1.85_37_04_67,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
A_ : Dict = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
A_ : List[Any] = tf.convert_to_tensor(
[8.22_20_99, 7.3_53_41_26, 8.43_20_78, 7.4_40_20_75, 9.3_84_51, 6.27_11_59, 8.82_75_31, 5.4_40_29_95, 7.3_85_79_56, 9.67_70_23] , dtype=tf.floataa , ) # expected non filtered values as noted above
A_ : Dict = tf_top_k_top_p_filtering(_snake_case , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
A_ : Optional[int] = output[output != -float("""inf""" )]
A_ : Tuple = tf.cast(
tf.where(tf.not_equal(_snake_case , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_snake_case , _snake_case , rtol=1E-12 )
tf.debugging.assert_equal(_snake_case , _snake_case )
@require_tf
class _lowerCAmelCase ( unittest.TestCase , a__ ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
__SCREAMING_SNAKE_CASE : List[Any] = {
'AutoModelForCausalLM': TFAutoModelForCausalLM,
'AutoModelForSpeechSeq2Seq': TFAutoModelForSpeechSeqaSeq,
'AutoModelForSeq2SeqLM': TFAutoModelForSeqaSeqLM,
'AutoModelForVision2Seq': TFAutoModelForVisionaSeq,
'LogitsProcessorList': TFLogitsProcessorList,
'MinLengthLogitsProcessor': TFMinLengthLogitsProcessor,
'create_tensor_fn': tf.convert_to_tensor,
'floats_tensor': floats_tensor,
'return_tensors': 'tf',
}
@slow
def _a (self ):
A_ : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
A_ : List[str] = 2
A_ : List[str] = 2
class _lowerCAmelCase ( tf.Module ):
def __init__(self , lowercase ):
super(_snake_case , self ).__init__()
A_ : List[Any] = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=_snake_case , )
def _a (self , lowercase , lowercase ):
A_ : Any = self.model.generate(
input_ids=_snake_case , attention_mask=_snake_case , max_new_tokens=_snake_case , return_dict_in_generate=_snake_case , )
return {"sequences": outputs["sequences"]}
A_ : Tuple = [[2, 0], [102, 103]]
A_ : Optional[int] = [[1, 0], [1, 1]]
A_ : Union[str, Any] = DummyModel(model=_snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_snake_case , _snake_case , signatures={"""serving_default""": dummy_model.serving} )
A_ : str = tf.saved_model.load(_snake_case ).signatures["""serving_default"""]
for batch_size in range(1 , len(_snake_case ) + 1 ):
A_ : Tuple = {
"""input_ids""": tf.constant(dummy_input_ids[:batch_size] ),
"""attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ),
}
A_ : int = serving_func(**_snake_case )["""sequences"""]
A_ : Optional[int] = test_model.generate(**_snake_case , max_new_tokens=_snake_case )
tf.debugging.assert_equal(_snake_case , _snake_case )
@slow
def _a (self ):
A_ : Tuple = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
A_ : Union[str, Any] = 1
A_ : int = 2
class _lowerCAmelCase ( tf.Module ):
def __init__(self , lowercase ):
super(_snake_case , self ).__init__()
A_ : Tuple = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=_snake_case , )
def _a (self , lowercase , lowercase ):
A_ : Dict = self.model.generate(
input_ids=_snake_case , attention_mask=_snake_case , max_new_tokens=_snake_case , return_dict_in_generate=_snake_case , )
return {"sequences": outputs["sequences"]}
A_ : List[str] = [[2], [102, 103]]
A_ : Any = [[1], [1, 1]]
A_ : List[Any] = DummyModel(model=_snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_snake_case , _snake_case , signatures={"""serving_default""": dummy_model.serving} )
A_ : Dict = tf.saved_model.load(_snake_case ).signatures["""serving_default"""]
for input_row in range(len(_snake_case ) ):
A_ : Union[str, Any] = {
"""input_ids""": tf.constant([dummy_input_ids[input_row]] ),
"""attention_mask""": tf.constant([dummy_attention_masks[input_row]] ),
}
A_ : str = serving_func(**_snake_case )["""sequences"""]
A_ : List[str] = test_model.generate(**_snake_case , max_new_tokens=_snake_case )
tf.debugging.assert_equal(_snake_case , _snake_case )
@slow
@require_tensorflow_text
def _a (self ):
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=_snake_case )
class _lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__(self ):
super().__init__()
A_ : Optional[int] = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_snake_case , """spiece.model""" ) , """rb""" ).read() )
A_ : str = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
def _a (self , lowercase , *lowercase , **lowercase ):
A_ : Optional[int] = self.tokenizer.tokenize(_snake_case )
A_, A_ : Optional[Any] = text.pad_model_inputs(
_snake_case , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
A_ : int = self.model.generate(input_ids=_snake_case , attention_mask=_snake_case )
return self.tokenizer.detokenize(_snake_case )
A_ : List[str] = CompleteSentenceTransformer()
A_ : Tuple = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" )
A_ : Union[str, Any] = complete_model(_snake_case )
A_ : List[Any] = tf.keras.Model(_snake_case , _snake_case )
keras_model.save(_snake_case )
def _a (self ):
A_ : List[str] = {
"""do_sample""": True,
"""num_beams""": 1,
"""top_p""": 0.7,
"""top_k""": 10,
"""temperature""": 0.7,
}
A_ : List[Any] = 14
A_ : Union[str, Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
A_ : Tuple = """Hello, my dog is cute and"""
A_ : Any = tokenizer(_snake_case , return_tensors="""tf""" )
A_ : Optional[Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
A_ : Dict = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
A_ : Dict = model.generate(**_snake_case , eos_token_id=_snake_case , **_snake_case )
self.assertTrue(expectation == len(generated_tokens[0] ) )
A_ : str = [638, 198]
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
A_ : Any = model.generate(**_snake_case , eos_token_id=_snake_case , **_snake_case )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def _a (self ):
A_ : int = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
A_ : int = """Hugging Face is a technology company based in New York and Paris."""
A_ : List[str] = bart_tokenizer(_snake_case , return_tensors="""tf""" ).input_ids
A_ : List[str] = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
A_ : Optional[int] = bart_model.generate(_snake_case ).numpy()
class _lowerCAmelCase ( a__ ):
def _a (self , lowercase , lowercase=None , **lowercase ):
return super().call(_snake_case , **_snake_case )
A_ : Optional[int] = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
A_ : List[str] = bart_model.generate(_snake_case , foo="""bar""" ).numpy()
self.assertTrue(np.array_equal(_snake_case , _snake_case ) )
class _lowerCAmelCase ( bart_model.model.encoder.__class__ ):
def _a (self , lowercase , **lowercase ):
return super().call(_snake_case , **_snake_case )
A_ : str = FakeEncoder(bart_model.config , bart_model.model.shared )
A_ : Any = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
A_ : int = bart_model.generate(_snake_case ).numpy()
with self.assertRaises(_snake_case ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_snake_case , foo="""bar""" ) | 667 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class a :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=2 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , _snake_case=10_00 , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
lowerCAmelCase = range_bbox
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCAmelCase = bbox[i, j, 3]
lowerCAmelCase = bbox[i, j, 1]
lowerCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCAmelCase = bbox[i, j, 2]
lowerCAmelCase = bbox[i, j, 0]
lowerCAmelCase = t
lowerCAmelCase = tf.convert_to_tensor(_snake_case )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMModel(config=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , token_type_ids=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForMaskedLM(config=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFLayoutLMForSequenceClassification(config=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFLayoutLMForTokenClassification(config=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForQuestionAnswering(config=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
snake_case__ = (
{
'''feature-extraction''': TFLayoutLMModel,
'''fill-mask''': TFLayoutLMForMaskedLM,
'''text-classification''': TFLayoutLMForSequenceClassification,
'''token-classification''': TFLayoutLMForTokenClassification,
'''zero-shot''': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ = False
snake_case__ = True
snake_case__ = 1_0
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = TFLayoutLMModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ():
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
lowerCAmelCase = tf.convert_to_tensor([[101,1019,1014,1016,1037,1_2849,4747,1004,1_4246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,1_1300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,1_9274,2772,6205,2_7814,1_6147,1_6147,4343,2047,1_0283,1_0969,1_4389,1012,2338,102]] ) # noqa: E231
lowerCAmelCase = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
lowerCAmelCase = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
lowerCAmelCase = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
lowerCAmelCase = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class a ( unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase = model(input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
# test the sequence output on [0, :3, :3]
lowerCAmelCase = tf.convert_to_tensor(
[[0.1_785, -0.1_947, -0.0_425], [-0.3_254, -0.2_807, 0.2_553], [-0.5_391, -0.3_322, 0.3_364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _snake_case , atol=1E-3 ) )
# test the pooled output on [1, :3]
lowerCAmelCase = tf.convert_to_tensor([-0.6_580, -0.0_214, 0.8_552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _snake_case , atol=1E-3 ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase = model(
input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
lowerCAmelCase = outputs.loss
lowerCAmelCase = (2,)
self.assertEqual(loss.shape , _snake_case )
# test the shape of the logits
lowerCAmelCase = outputs.logits
lowerCAmelCase = (2, 2)
self.assertEqual(logits.shape , _snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=13 )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase = model(
input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
# test the shape of the logits
lowerCAmelCase = outputs.logits
lowerCAmelCase = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , _snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase = model(input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
# test the shape of the logits
lowerCAmelCase = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , _snake_case )
self.assertEqual(outputs.end_logits.shape , _snake_case )
| 4 | 0 |
"""simple docstring"""
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _lowercase ( __snake_case ,__snake_case ) -> Union[str, Any]:
__lowerCAmelCase : Optional[Any] = k_size // 2
__lowerCAmelCase , __lowerCAmelCase : Any = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
__lowerCAmelCase : str = 1 / (2 * pi * sigma) * exp(-(square(_UpperCAmelCase ) + square(_UpperCAmelCase )) / (2 * square(_UpperCAmelCase )) )
return g
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Optional[int]:
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = image.shape[0], image.shape[1]
# dst image height and width
__lowerCAmelCase : int = height - k_size + 1
__lowerCAmelCase : Union[str, Any] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
__lowerCAmelCase : Optional[int] = zeros((dst_height * dst_width, k_size * k_size) )
__lowerCAmelCase : List[Any] = 0
for i, j in product(range(_UpperCAmelCase ) ,range(_UpperCAmelCase ) ):
__lowerCAmelCase : List[Any] = ravel(image[i : i + k_size, j : j + k_size] )
__lowerCAmelCase : Tuple = window
row += 1
# turn the kernel into shape(k*k, 1)
__lowerCAmelCase : int = gen_gaussian_kernel(_UpperCAmelCase ,_UpperCAmelCase )
__lowerCAmelCase : Optional[Any] = ravel(_UpperCAmelCase )
# reshape and get the dst image
__lowerCAmelCase : List[Any] = dot(_UpperCAmelCase ,_UpperCAmelCase ).reshape(_UpperCAmelCase ,_UpperCAmelCase ).astype(_UpperCAmelCase )
return dst
if __name__ == "__main__":
# read original image
__snake_case : Tuple = imread(R'../image_data/lena.jpg')
# turn image in gray scale value
__snake_case : int = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__snake_case : Optional[Any] = gaussian_filter(gray, 3, sigma=1)
__snake_case : Tuple = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('gaussian filter with 3x3 mask', gaussianaxa)
imshow('gaussian filter with 5x5 mask', gaussianaxa)
waitKey() | 293 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__UpperCamelCase : Union[str, Any] = '''examples/'''
__UpperCamelCase : str = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__UpperCamelCase : List[str] = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
__UpperCamelCase : Optional[int] = '''README.md'''
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ):
with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCAmelCase = f.read()
lowerCAmelCase ,lowerCAmelCase = REPLACE_PATTERNS[pattern]
lowerCAmelCase = replace.replace('VERSION' , _UpperCAmelCase )
lowerCAmelCase = re_pattern.sub(_UpperCAmelCase , _UpperCAmelCase )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] ):
for folder, directories, fnames in os.walk(_UpperCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase , pattern='examples' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Dict=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if not patch:
update_version_in_examples(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = '🤗 Transformers currently provides the following architectures'
lowerCAmelCase = '1. Want to contribute a new model?'
with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCAmelCase = f.readlines()
# Find the start of the list.
lowerCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
lowerCAmelCase = lines[index].replace(
'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , )
index += 1
with open(_UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ():
with open(REPLACE_FILES['init'] , 'r' ) as f:
lowerCAmelCase = f.read()
lowerCAmelCase = REPLACE_PATTERNS['init'][0].search(_UpperCAmelCase ).groups()[0]
return packaging.version.parse(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple=False ):
lowerCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
lowerCAmelCase = default_version.base_version
elif patch:
lowerCAmelCase = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
lowerCAmelCase = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
lowerCAmelCase = input(F'Which version are you releasing? [{default_version}]' )
if len(_UpperCAmelCase ) == 0:
lowerCAmelCase = default_version
print(F'Updating version to {version}.' )
global_version_update(_UpperCAmelCase , patch=_UpperCAmelCase )
if not patch:
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = get_version()
lowerCAmelCase = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
lowerCAmelCase = current_version.base_version
# Check with the user we got that right.
lowerCAmelCase = input(F'Which version are we developing now? [{dev_version}]' )
if len(_UpperCAmelCase ) == 0:
lowerCAmelCase = dev_version
print(F'Updating version to {version}.' )
global_version_update(_UpperCAmelCase )
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__UpperCamelCase : Optional[int] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 4 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__a: List[Any] = {'''configuration_encoder_decoder''': ['''EncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: int = ['''EncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Optional[int] = ['''TFEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Optional[int] = ['''FlaxEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
__a: Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 152 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__UpperCamelCase : Optional[int] = pytest.mark.integration
@require_faiss
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_snake_case ) for x in np.arange(30 ).tolist()]} )
return dset
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = self._create_dummy_dataset()
lowerCAmelCase = dset.map(
lambda _snake_case , _snake_case : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_snake_case , keep_in_memory=_snake_case )
lowerCAmelCase = dset.add_faiss_index('vecs' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_snake_case ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(_snake_case , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
from elasticsearch import Elasticsearch
lowerCAmelCase = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCAmelCase = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
lowerCAmelCase = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=_snake_case )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase = 1
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case )
self.assertRaises(_snake_case , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCAmelCase = np.eye(5 , dtype=np.floataa )[::-1]
lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case )
self.assertRaises(_snake_case , index.search_batch , queries[0] )
lowerCAmelCase = [scores[0] for scores in total_scores]
lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_snake_case ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCAmelCase = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(_snake_case ):
lowerCAmelCase = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = faiss.IndexFlat(5 )
lowerCAmelCase = FaissIndex(custom_index=_snake_case )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_snake_case ) as tmp_file:
index.save(tmp_file.name )
lowerCAmelCase = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase = 1
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict ):
import faiss
lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCAmelCase = 'index.faiss'
lowerCAmelCase = F'mock://{index_name}'
index.save(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCAmelCase = FaissIndex.load(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase = 1
lowerCAmelCase ,lowerCAmelCase = index.search(_UpperCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCAmelCase = Elasticsearch()
lowerCAmelCase = {'acknowledged': True}
lowerCAmelCase = ElasticSearchIndex(es_client=_snake_case )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
lowerCAmelCase = 'foo'
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCAmelCase = 'foo'
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCAmelCase = ['foo', 'bar', 'foobar']
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case )
lowerCAmelCase = [scores[0] for scores in total_scores]
lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_snake_case ) , 0 )
self.assertListEqual([1, 1, 1] , _snake_case )
# batched queries with timeout
lowerCAmelCase = ['foo', 'bar', 'foobar']
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case , request_timeout=30 )
lowerCAmelCase = [scores[0] for scores in total_scores]
lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_snake_case ) , 0 )
self.assertListEqual([1, 1, 1] , _snake_case )
| 4 | 0 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase ( a__ ):
lowercase : List[Any] = (DDIMParallelScheduler,)
lowercase : Optional[Any] = (('eta', 0.0), ('num_inference_steps', 5_0))
def a_ ( self , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""clip_sample""": True,
}
config.update(**_snake_case )
return config
def a_ ( self , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[str] = self.scheduler_classes[0]
UpperCamelCase : Any = self.get_scheduler_config(**_snake_case )
UpperCamelCase : Any = scheduler_class(**_snake_case )
UpperCamelCase , UpperCamelCase : Dict = 10, 0.0
UpperCamelCase : Optional[Any] = self.dummy_model()
UpperCamelCase : List[str] = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case )
for t in scheduler.timesteps:
UpperCamelCase : Union[str, Any] = model(_snake_case , _snake_case )
UpperCamelCase : str = scheduler.step(_snake_case , _snake_case , _snake_case , _snake_case ).prev_sample
return sample
def a_ ( self ):
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=_snake_case )
def a_ ( self ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_snake_case )
UpperCamelCase : str = self.scheduler_classes[0]
UpperCamelCase : List[str] = self.get_scheduler_config(steps_offset=1 )
UpperCamelCase : Tuple = scheduler_class(**_snake_case )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def a_ ( self ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_snake_case , beta_end=_snake_case )
def a_ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_snake_case )
def a_ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case )
def a_ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_snake_case )
def a_ ( self ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_snake_case )
def a_ ( self ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_snake_case )
def a_ ( self ):
self.check_over_configs(thresholding=_snake_case )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_snake_case , prediction_type=_snake_case , sample_max_value=_snake_case , )
def a_ ( self ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=_snake_case )
def a_ ( self ):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=_snake_case , num_inference_steps=_snake_case )
def a_ ( self ):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_snake_case , eta=_snake_case )
def a_ ( self ):
UpperCamelCase : int = self.scheduler_classes[0]
UpperCamelCase : str = self.get_scheduler_config()
UpperCamelCase : Optional[int] = scheduler_class(**_snake_case )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5
def a_ ( self ):
UpperCamelCase : str = self.scheduler_classes[0]
UpperCamelCase : Union[str, Any] = self.get_scheduler_config()
UpperCamelCase : Any = scheduler_class(**_snake_case )
UpperCamelCase , UpperCamelCase : List[str] = 10, 0.0
scheduler.set_timesteps(_snake_case )
UpperCamelCase : Union[str, Any] = self.dummy_model()
UpperCamelCase : Any = self.dummy_sample_deter
UpperCamelCase : Union[str, Any] = self.dummy_sample_deter + 0.1
UpperCamelCase : List[str] = self.dummy_sample_deter - 0.1
UpperCamelCase : Any = samplea.shape[0]
UpperCamelCase : Optional[int] = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCamelCase : Union[str, Any] = torch.arange(_snake_case )[0:3, None].repeat(1 , _snake_case )
UpperCamelCase : Optional[Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCamelCase : Union[str, Any] = scheduler.batch_step_no_noise(_snake_case , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _snake_case )
UpperCamelCase : int = torch.sum(torch.abs(_snake_case ) )
UpperCamelCase : Dict = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 1147.7904 ) < 1e-2
assert abs(result_mean.item() - 0.4982 ) < 1e-3
def a_ ( self ):
UpperCamelCase : Any = self.full_loop()
UpperCamelCase : Dict = torch.sum(torch.abs(_snake_case ) )
UpperCamelCase : List[str] = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 172.0067 ) < 1e-2
assert abs(result_mean.item() - 0.223967 ) < 1e-3
def a_ ( self ):
UpperCamelCase : List[Any] = self.full_loop(prediction_type="""v_prediction""" )
UpperCamelCase : Optional[Any] = torch.sum(torch.abs(_snake_case ) )
UpperCamelCase : Optional[Any] = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 52.5302 ) < 1e-2
assert abs(result_mean.item() - 0.0684 ) < 1e-3
def a_ ( self ):
UpperCamelCase : str = self.full_loop(set_alpha_to_one=_snake_case , beta_start=0.01 )
UpperCamelCase : List[Any] = torch.sum(torch.abs(_snake_case ) )
UpperCamelCase : List[Any] = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 149.8295 ) < 1e-2
assert abs(result_mean.item() - 0.1951 ) < 1e-3
def a_ ( self ):
UpperCamelCase : Tuple = self.full_loop(set_alpha_to_one=_snake_case , beta_start=0.01 )
UpperCamelCase : List[str] = torch.sum(torch.abs(_snake_case ) )
UpperCamelCase : Dict = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 149.0784 ) < 1e-2
assert abs(result_mean.item() - 0.1941 ) < 1e-3
| 499 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = IFInpaintingPipeline
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._get_dummy_components()
def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(_snake_case )
else:
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_local()
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 4 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCAmelCase = {
'''configuration_transfo_xl''': ['''TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TransfoXLConfig'''],
'''tokenization_transfo_xl''': ['''TransfoXLCorpus''', '''TransfoXLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AdaptiveEmbedding''',
'''TransfoXLForSequenceClassification''',
'''TransfoXLLMHeadModel''',
'''TransfoXLModel''',
'''TransfoXLPreTrainedModel''',
'''load_tf_weights_in_transfo_xl''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAdaptiveEmbedding''',
'''TFTransfoXLForSequenceClassification''',
'''TFTransfoXLLMHeadModel''',
'''TFTransfoXLMainLayer''',
'''TFTransfoXLModel''',
'''TFTransfoXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 651 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class a :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
lowerCAmelCase = self.vocab_size - 1
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , head_mask=_snake_case )
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTLMHeadModel(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTDoubleHeadsModel(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = OpenAIGPTForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class a ( a__ , a__ , a__ , unittest.TestCase ):
snake_case__ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
snake_case__ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
snake_case__ = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case=False ):
"""simple docstring"""
lowerCAmelCase = super()._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_snake_case , )
lowerCAmelCase = inputs_dict['labels']
lowerCAmelCase = inputs_dict['labels']
lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_snake_case , )
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_snake_case )
return inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , n_embd=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = OpenAIGPTModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@require_torch
class a ( unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(_snake_case )
lowerCAmelCase = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=_snake_case ) # the president is
lowerCAmelCase = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCAmelCase = model.generate(_snake_case , do_sample=_snake_case )
self.assertListEqual(output_ids[0].tolist() , _snake_case )
| 4 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 412 |
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__UpperCamelCase : str = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path' , type=_UpperCAmelCase , default='data/dump.txt' , help='The path to the data.' )
parser.add_argument('--tokenizer_type' , type=_UpperCAmelCase , default='bert' , choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name' , type=_UpperCAmelCase , default='bert-base-uncased' , help='The tokenizer to use.' )
parser.add_argument('--dump_file' , type=_UpperCAmelCase , default='data/dump' , help='The dump file prefix.' )
lowerCAmelCase = parser.parse_args()
logger.info(F'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
lowerCAmelCase = BertTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase = tokenizer.special_tokens_map['cls_token'] # `[CLS]`
lowerCAmelCase = tokenizer.special_tokens_map['sep_token'] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowerCAmelCase = RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase = tokenizer.special_tokens_map['cls_token'] # `<s>`
lowerCAmelCase = tokenizer.special_tokens_map['sep_token'] # `</s>`
elif args.tokenizer_type == "gpt2":
lowerCAmelCase = GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>`
lowerCAmelCase = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>`
logger.info(F'Loading text from {args.file_path}' )
with open(args.file_path , 'r' , encoding='utf8' ) as fp:
lowerCAmelCase = fp.readlines()
logger.info('Start encoding' )
logger.info(F'{len(_UpperCAmelCase )} examples to process.' )
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 1_0000
lowerCAmelCase = time.time()
for text in data:
lowerCAmelCase = F'{bos} {text.strip()} {sep}'
lowerCAmelCase = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
rslt.append(_UpperCAmelCase )
iter += 1
if iter % interval == 0:
lowerCAmelCase = time.time()
logger.info(F'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
lowerCAmelCase = time.time()
logger.info('Finished binarization' )
logger.info(F'{len(_UpperCAmelCase )} examples processed.' )
lowerCAmelCase = F'{args.dump_file}.{args.tokenizer_name}.pickle'
lowerCAmelCase = tokenizer.vocab_size
if vocab_size < (1 << 16):
lowerCAmelCase = [np.uintaa(_UpperCAmelCase ) for d in rslt]
else:
lowerCAmelCase = [np.intaa(_UpperCAmelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'Dump to {dp_file}' )
with open(_UpperCAmelCase , 'wb' ) as handle:
pickle.dump(rslt_ , _UpperCAmelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 4 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Optional[int] = logging.get_logger(__name__)
a : Dict = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class a ( a__ ):
"""simple docstring"""
a : Dict = 'markuplm'
def __init__( self : List[str] , __lowercase : List[Any]=30522 , __lowercase : Any=768 , __lowercase : Any=12 , __lowercase : Optional[Any]=12 , __lowercase : str=3072 , __lowercase : str="gelu" , __lowercase : Dict=0.1 , __lowercase : Optional[Any]=0.1 , __lowercase : List[Any]=512 , __lowercase : List[Any]=2 , __lowercase : str=0.02 , __lowercase : Any=1e-1_2 , __lowercase : int=0 , __lowercase : Dict=0 , __lowercase : int=2 , __lowercase : Any=256 , __lowercase : Optional[Any]=1024 , __lowercase : List[Any]=216 , __lowercase : Optional[int]=1001 , __lowercase : Dict=32 , __lowercase : Dict=50 , __lowercase : Optional[int]="absolute" , __lowercase : Dict=True , __lowercase : Optional[Any]=None , **__lowercase : Dict , ) -> Union[str, Any]:
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case , )
__UpperCAmelCase : str = vocab_size
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : int = hidden_dropout_prob
__UpperCAmelCase : Tuple = attention_probs_dropout_prob
__UpperCAmelCase : List[Any] = max_position_embeddings
__UpperCAmelCase : Union[str, Any] = type_vocab_size
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : Optional[Any] = layer_norm_eps
__UpperCAmelCase : Any = position_embedding_type
__UpperCAmelCase : Tuple = use_cache
__UpperCAmelCase : Union[str, Any] = classifier_dropout
# additional properties
__UpperCAmelCase : Union[str, Any] = max_depth
__UpperCAmelCase : Union[str, Any] = max_xpath_tag_unit_embeddings
__UpperCAmelCase : Optional[Any] = max_xpath_subs_unit_embeddings
__UpperCAmelCase : Optional[int] = tag_pad_id
__UpperCAmelCase : Dict = subs_pad_id
__UpperCAmelCase : str = xpath_unit_hidden_size
| 63 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class a ( a__ ):
snake_case__ = '''bert'''
def __init__( self , _snake_case=3_05_22 , _snake_case=7_68 , _snake_case=12 , _snake_case=12 , _snake_case=30_72 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=0 , _snake_case="absolute" , _snake_case=True , _snake_case=None , **_snake_case , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , **_snake_case )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
lowerCAmelCase = classifier_dropout
class a ( a__ ):
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 4 | 0 |
def a__ ( snake_case , snake_case ):
"""simple docstring"""
if mass < 0:
raise ValueError('''The mass of a body cannot be negative''' )
return 0.5 * mass * abs(_UpperCAmelCase ) * abs(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 74 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a ( a__ , unittest.TestCase ):
snake_case__ = DanceDiffusionPipeline
snake_case__ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {
'''callback''',
'''latents''',
'''callback_steps''',
'''output_type''',
'''num_images_per_prompt''',
}
snake_case__ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_snake_case , use_timestep_embedding=_snake_case , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
lowerCAmelCase = IPNDMScheduler()
lowerCAmelCase = {
'unet': unet,
'scheduler': scheduler,
}
return components
def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(_snake_case )
else:
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowerCAmelCase = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = DanceDiffusionPipeline(**_snake_case )
lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = self.get_dummy_inputs(_snake_case )
lowerCAmelCase = pipe(**_snake_case )
lowerCAmelCase = output.audios
lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowerCAmelCase = np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = torch_device
lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(generator=_snake_case , num_inference_steps=1_00 , audio_length_in_s=4.096 )
lowerCAmelCase = output.audios
lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase = np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = torch_device
lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(generator=_snake_case , num_inference_steps=1_00 , audio_length_in_s=4.096 )
lowerCAmelCase = output.audios
lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase = np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 4 | 0 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def _SCREAMING_SNAKE_CASE ( a ) -> Tuple:
return x + 2
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
__A : Any = 'x = 3'
__A : Dict = {}
__A : int = evaluate(_snake_case , {} , state=_snake_case )
assert result == 3
self.assertDictEqual(_snake_case , {'x': 3} )
__A : Any = 'x = y'
__A : Union[str, Any] = {'y': 5}
__A : Any = evaluate(_snake_case , {} , state=_snake_case )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_snake_case , {'x': 5, 'y': 5} )
def UpperCAmelCase_ ( self ):
__A : List[Any] = 'y = add_two(x)'
__A : str = {'x': 3}
__A : Any = evaluate(_snake_case , {'add_two': add_two} , state=_snake_case )
assert result == 5
self.assertDictEqual(_snake_case , {'x': 3, 'y': 5} )
# Won't work without the tool
with CaptureStdout() as out:
__A : Union[str, Any] = evaluate(_snake_case , {} , state=_snake_case )
assert result is None
assert "tried to execute add_two" in out.out
def UpperCAmelCase_ ( self ):
__A : List[Any] = 'x = 3'
__A : Union[str, Any] = {}
__A : int = evaluate(_snake_case , {} , state=_snake_case )
assert result == 3
self.assertDictEqual(_snake_case , {'x': 3} )
def UpperCAmelCase_ ( self ):
__A : str = 'test_dict = {\'x\': x, \'y\': add_two(x)}'
__A : Optional[Any] = {'x': 3}
__A : str = evaluate(_snake_case , {'add_two': add_two} , state=_snake_case )
self.assertDictEqual(_snake_case , {'x': 3, 'y': 5} )
self.assertDictEqual(_snake_case , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def UpperCAmelCase_ ( self ):
__A : str = 'x = 3\ny = 5'
__A : Union[str, Any] = {}
__A : Any = evaluate(_snake_case , {} , state=_snake_case )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_snake_case , {'x': 3, 'y': 5} )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = 'text = f\'This is x: {x}.\''
__A : List[str] = {'x': 3}
__A : Any = evaluate(_snake_case , {} , state=_snake_case )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(_snake_case , {'x': 3, 'text': 'This is x: 3.'} )
def UpperCAmelCase_ ( self ):
__A : int = 'if x <= 3:\n y = 2\nelse:\n y = 5'
__A : Tuple = {'x': 3}
__A : List[str] = evaluate(_snake_case , {} , state=_snake_case )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(_snake_case , {'x': 3, 'y': 2} )
__A : Optional[Any] = {'x': 8}
__A : Dict = evaluate(_snake_case , {} , state=_snake_case )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_snake_case , {'x': 8, 'y': 5} )
def UpperCAmelCase_ ( self ):
__A : Optional[int] = 'test_list = [x, add_two(x)]'
__A : Dict = {'x': 3}
__A : List[str] = evaluate(_snake_case , {'add_two': add_two} , state=_snake_case )
self.assertListEqual(_snake_case , [3, 5] )
self.assertDictEqual(_snake_case , {'x': 3, 'test_list': [3, 5]} )
def UpperCAmelCase_ ( self ):
__A : List[Any] = 'y = x'
__A : Optional[int] = {'x': 3}
__A : List[Any] = evaluate(_snake_case , {} , state=_snake_case )
assert result == 3
self.assertDictEqual(_snake_case , {'x': 3, 'y': 3} )
def UpperCAmelCase_ ( self ):
__A : Dict = 'test_list = [x, add_two(x)]\ntest_list[1]'
__A : Dict = {'x': 3}
__A : int = evaluate(_snake_case , {'add_two': add_two} , state=_snake_case )
assert result == 5
self.assertDictEqual(_snake_case , {'x': 3, 'test_list': [3, 5]} )
__A : Dict = 'test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'
__A : List[str] = {'x': 3}
__A : int = evaluate(_snake_case , {'add_two': add_two} , state=_snake_case )
assert result == 5
self.assertDictEqual(_snake_case , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def UpperCAmelCase_ ( self ):
__A : List[Any] = 'x = 0\nfor i in range(3):\n x = i'
__A : Dict = {}
__A : Tuple = evaluate(_snake_case , {'range': range} , state=_snake_case )
assert result == 2
self.assertDictEqual(_snake_case , {'x': 2, 'i': 2} )
| 239 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class a :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=False , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , use_stable_embedding=_snake_case , )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenLlamaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = OpenLlamaModel(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , )
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , )
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = OpenLlamaForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = OpenLlamaForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
# first forward pass
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , use_cache=_snake_case , )
lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , output_hidden_states=_snake_case , )['hidden_states'][0]
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , past_key_values=_snake_case , output_hidden_states=_snake_case , )['hidden_states'][0]
# select random slice
lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-3 ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a ( a__ , a__ , a__ , unittest.TestCase ):
snake_case__ = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
snake_case__ = (OpenLlamaForCausalLM,) if is_torch_available() else ()
snake_case__ = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = OpenLlamaModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase = type
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(_snake_case )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = 'single_label_classification'
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(_snake_case )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = 'multi_label_classification'
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(_snake_case )
lowerCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = ids_tensor([1, 10] , config.vocab_size )
lowerCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase = OpenLlamaModel(_snake_case )
original_model.to(_snake_case )
original_model.eval()
lowerCAmelCase = original_model(_snake_case ).last_hidden_state
lowerCAmelCase = original_model(_snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase = {'type': scaling_type, 'factor': 10.0}
lowerCAmelCase = OpenLlamaModel(_snake_case )
scaled_model.to(_snake_case )
scaled_model.eval()
lowerCAmelCase = scaled_model(_snake_case ).last_hidden_state
lowerCAmelCase = scaled_model(_snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
| 4 | 0 |
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
a = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class __a ( a__ ):
def __init__( self : Optional[int] ,*lowerCamelCase : Tuple ,**lowerCamelCase : List[Any] ):
'''simple docstring'''
super().__init__(*_snake_case ,**_snake_case )
requires_backends(self ,"""decord""" )
self.check_model_type(_snake_case )
def UpperCAmelCase__ ( self : int ,lowerCamelCase : List[Any]=None ,lowerCamelCase : Tuple=None ,lowerCamelCase : Optional[Any]=None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
if frame_sampling_rate is not None:
__SCREAMING_SNAKE_CASE = frame_sampling_rate
if num_frames is not None:
__SCREAMING_SNAKE_CASE = num_frames
__SCREAMING_SNAKE_CASE = {}
if top_k is not None:
__SCREAMING_SNAKE_CASE = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[Any] ,lowerCamelCase : Union[str, Any] ,**lowerCamelCase : Dict ):
'''simple docstring'''
return super().__call__(_snake_case ,**_snake_case )
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Dict ,lowerCamelCase : int=None ,lowerCamelCase : Tuple=1 ):
'''simple docstring'''
if num_frames is None:
__SCREAMING_SNAKE_CASE = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
__SCREAMING_SNAKE_CASE = BytesIO(requests.get(_snake_case ).content )
__SCREAMING_SNAKE_CASE = VideoReader(_snake_case )
videoreader.seek(0 )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = num_frames * frame_sampling_rate - 1
__SCREAMING_SNAKE_CASE = np.linspace(_snake_case ,_snake_case ,num=_snake_case ,dtype=np.intaa )
__SCREAMING_SNAKE_CASE = videoreader.get_batch(_snake_case ).asnumpy()
__SCREAMING_SNAKE_CASE = list(_snake_case )
__SCREAMING_SNAKE_CASE = self.image_processor(_snake_case ,return_tensors=self.framework )
return model_inputs
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model(**_snake_case )
return model_outputs
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : List[str]=5 ):
'''simple docstring'''
if top_k > self.model.config.num_labels:
__SCREAMING_SNAKE_CASE = self.model.config.num_labels
if self.framework == "pt":
__SCREAMING_SNAKE_CASE = model_outputs.logits.softmax(-1 )[0]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = probs.topk(_snake_case )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
__SCREAMING_SNAKE_CASE = scores.tolist()
__SCREAMING_SNAKE_CASE = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_snake_case ,_snake_case )]
| 109 |
"""simple docstring"""
from typing import Any
class a :
def __init__( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = data
lowerCAmelCase = None
def __repr__( self ):
"""simple docstring"""
return F'Node({self.data})'
class a :
def __init__( self ):
"""simple docstring"""
lowerCAmelCase = None
def __iter__( self ):
"""simple docstring"""
lowerCAmelCase = self.head
while node:
yield node.data
lowerCAmelCase = node.next
def __len__( self ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ):
"""simple docstring"""
return "->".join([str(_snake_case ) for item in self] )
def __getitem__( self , _snake_case ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , _snake_case , _snake_case ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
lowerCAmelCase = self.head
for _ in range(_snake_case ):
lowerCAmelCase = current.next
lowerCAmelCase = data
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
self.insert_nth(len(self ) , _snake_case )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
self.insert_nth(0 , _snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
lowerCAmelCase = Node(_snake_case )
if self.head is None:
lowerCAmelCase = new_node
elif index == 0:
lowerCAmelCase = self.head # link new_node to head
lowerCAmelCase = new_node
else:
lowerCAmelCase = self.head
for _ in range(index - 1 ):
lowerCAmelCase = temp.next
lowerCAmelCase = temp.next
lowerCAmelCase = new_node
def UpperCamelCase__ ( self ): # print every node data
"""simple docstring"""
print(self )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.delete_nth(0 )
def UpperCamelCase__ ( self ): # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase__ ( self , _snake_case = 0 ):
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
lowerCAmelCase = self.head # default first node
if index == 0:
lowerCAmelCase = self.head.next
else:
lowerCAmelCase = self.head
for _ in range(index - 1 ):
lowerCAmelCase = temp.next
lowerCAmelCase = temp.next
lowerCAmelCase = temp.next.next
return delete_node.data
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.head is None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = None
lowerCAmelCase = self.head
while current:
# Store the current node's next node.
lowerCAmelCase = current.next
# Make the current node's next point backwards
lowerCAmelCase = prev
# Make the previous node be the current node
lowerCAmelCase = current
# Make the current node the next node (to progress iteration)
lowerCAmelCase = next_node
# Return prev in order to put the head at the end
lowerCAmelCase = prev
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = LinkedList()
assert linked_list.is_empty() is True
assert str(_UpperCAmelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_UpperCAmelCase ) == i
linked_list.insert_nth(_UpperCAmelCase , i + 1 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_UpperCAmelCase ) == 9
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCAmelCase = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(-8 , 1 ) )
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = [
-9,
100,
Node(7734_5112 ),
'dlrow olleH',
7,
5555,
0,
-192.5_5555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
lowerCAmelCase = LinkedList()
for i in test_input:
linked_list.insert_tail(_UpperCAmelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_UpperCAmelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCAmelCase = linked_list.delete_head()
assert result == -9
assert (
str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCAmelCase = linked_list.delete_tail()
assert result == 12.2
assert (
str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCAmelCase = linked_list.delete_nth(10 )
assert result is None
assert (
str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_UpperCAmelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_UpperCAmelCase )
assert (
str(_UpperCAmelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_UpperCAmelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _SCREAMING_SNAKE_CASE ():
from doctest import testmod
testmod()
lowerCAmelCase = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_UpperCAmelCase )
print('\nReading/changing Node data using indexing:' )
print(F'Element at Position 1: {linked_list[1]}' )
lowerCAmelCase = input('Enter New Value: ' ).strip()
print('New list:' )
print(_UpperCAmelCase )
print(F'length of linked_list is : {len(_UpperCAmelCase )}' )
if __name__ == "__main__":
main()
| 4 | 0 |
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class lowerCamelCase__ ( a__):
"""simple docstring"""
_A = 'microsoft/speecht5_tts'
_A = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
_A = 'text_reader'
_A = SpeechTaProcessor
_A = SpeechTaForTextToSpeech
_A = SpeechTaHifiGan
_A = ['text']
_A = ['audio']
def _a (self ):
'''simple docstring'''
if self.post_processor is None:
lowerCamelCase = "microsoft/speecht5_hifigan"
super().setup()
def _a (self , __a , __a=None ):
'''simple docstring'''
lowerCamelCase = self.pre_processor(text=_snake_case , return_tensors="pt" , truncation=_snake_case )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings." )
lowerCamelCase = load_dataset("Matthijs/cmu-arctic-xvectors" , split="validation" )
lowerCamelCase = torch.tensor(embeddings_dataset[73_05]["xvector"] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def _a (self , __a ):
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**_snake_case )
def _a (self , __a ):
'''simple docstring'''
with torch.no_grad():
return self.post_processor(_snake_case ).cpu().detach() | 623 |
"""simple docstring"""
from __future__ import annotations
import requests
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
lowerCAmelCase = F'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(_UpperCAmelCase ).json()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 10 ):
lowerCAmelCase = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
lowerCAmelCase = requests.get(_UpperCAmelCase ).json()[:max_stories]
return [get_hackernews_story(_UpperCAmelCase ) for story_id in story_ids]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 10 ):
lowerCAmelCase = hackernews_top_stories(_UpperCAmelCase )
return "\n".join('* [{title}]({url})'.format(**_UpperCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 4 | 0 |
'''simple docstring'''
import operator as op
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = []
A_ : List[str] = lambda lowerCamelCase__ , lowerCamelCase__ : int(x / y ) # noqa: E731 integer division operation
A_ : Optional[Any] = {
"""^""": op.pow,
"""*""": op.mul,
"""/""": div,
"""+""": op.add,
"""-""": op.sub,
} # operators & their respective operation
# print table header
print("""Symbol""".center(8 ) , """Action""".center(12 ) , """Stack""" , sep=""" | """ )
print("""-""" * (30 + len(_UpperCAmelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(_UpperCAmelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("""push(""" + x + """)""").ljust(12 ) , """,""".join(_UpperCAmelCase ) , sep=""" | """ )
else:
A_ : Tuple = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ) , ("""pop(""" + b + """)""").ljust(12 ) , """,""".join(_UpperCAmelCase ) , sep=""" | """ )
A_ : List[str] = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ) , ("""pop(""" + a + """)""").ljust(12 ) , """,""".join(_UpperCAmelCase ) , sep=""" | """ )
stack.append(
str(opr[x](int(_UpperCAmelCase ) , int(_UpperCAmelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("""push(""" + a + x + b + """)""").ljust(12 ) , """,""".join(_UpperCAmelCase ) , sep=""" | """ , )
return int(stack[0] )
if __name__ == "__main__":
lowerCamelCase :Any = input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''')
print('''\n\tResult = ''', solve(Postfix)) | 667 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any ):
lowerCAmelCase = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCAmelCase = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowerCAmelCase = 4
lowerCAmelCase = 48
lowerCAmelCase = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCAmelCase = [6, 6, 6, 6]
lowerCAmelCase = 60
lowerCAmelCase = [6, 6, 6, 6]
lowerCAmelCase = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCAmelCase = 4
lowerCAmelCase = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowerCAmelCase = 1
lowerCAmelCase = 1
lowerCAmelCase = 126
lowerCAmelCase = 7
lowerCAmelCase = 255.0
lowerCAmelCase = ''
return config
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ):
if "patch_embed.proj" in name and "layers" not in name:
lowerCAmelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
lowerCAmelCase = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
lowerCAmelCase = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
lowerCAmelCase = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
lowerCAmelCase = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowerCAmelCase = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowerCAmelCase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowerCAmelCase = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowerCAmelCase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCAmelCase = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
lowerCAmelCase = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
lowerCAmelCase = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
lowerCAmelCase = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
lowerCAmelCase = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
lowerCAmelCase = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
lowerCAmelCase = 'layernorm.weight'
if name == "norm.bias":
lowerCAmelCase = 'layernorm.bias'
if "conv_first" in name:
lowerCAmelCase = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowerCAmelCase = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowerCAmelCase = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
lowerCAmelCase = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
lowerCAmelCase = name.replace('upsample.2' , 'upsample.convolution_1' )
lowerCAmelCase = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
lowerCAmelCase = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
lowerCAmelCase = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
lowerCAmelCase = 'swin2sr.' + name
return name
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Dict ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase = orig_state_dict.pop(_UpperCAmelCase )
if "qkv" in key:
lowerCAmelCase = key.split('.' )
lowerCAmelCase = int(key_split[1] )
lowerCAmelCase = int(key_split[4] )
lowerCAmelCase = config.embed_dim
if "weight" in key:
lowerCAmelCase = val[:dim, :]
lowerCAmelCase = val[dim : dim * 2, :]
lowerCAmelCase = val[-dim:, :]
else:
lowerCAmelCase = val[:dim]
lowerCAmelCase = val[dim : dim * 2]
lowerCAmelCase = val[-dim:]
pass
else:
lowerCAmelCase = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple ):
lowerCAmelCase = get_config(_UpperCAmelCase )
lowerCAmelCase = SwinaSRForImageSuperResolution(_UpperCAmelCase )
model.eval()
lowerCAmelCase = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location='cpu' )
lowerCAmelCase = convert_state_dict(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase ,lowerCAmelCase = model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
raise ValueError('Missing keys when converting: {}'.format(_UpperCAmelCase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F'Unexpected key {key} in state_dict' )
# verify values
lowerCAmelCase = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
lowerCAmelCase = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('RGB' )
lowerCAmelCase = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowerCAmelCase = 126 if 'Jpeg' in checkpoint_url else 256
lowerCAmelCase = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowerCAmelCase = transforms(_UpperCAmelCase ).unsqueeze(0 )
if config.num_channels == 1:
lowerCAmelCase = pixel_values[:, 0, :, :].unsqueeze(1 )
lowerCAmelCase = model(_UpperCAmelCase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowerCAmelCase = torch.Size([1, 3, 512, 512] )
lowerCAmelCase = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCAmelCase = torch.Size([1, 3, 1024, 1024] )
lowerCAmelCase = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowerCAmelCase = torch.Size([1, 3, 1024, 1024] )
lowerCAmelCase = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCAmelCase = torch.Size([1, 3, 512, 512] )
lowerCAmelCase = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCAmelCase = torch.Size([1, 3, 1024, 1024] )
lowerCAmelCase = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), F'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , _UpperCAmelCase , atol=1e-3 )
print('Looks ok!' )
lowerCAmelCase = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
lowerCAmelCase = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_UpperCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
model.push_to_hub(F'caidas/{model_name}' )
processor.push_to_hub(F'caidas/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
__UpperCamelCase : Optional[int] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 | 0 |
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Optional[int]:
if a == 0:
raise ValueError("Coefficient \'a\' must not be zero." )
__lowerCAmelCase : Union[str, Any] = b * b - 4 * a * c
__lowerCAmelCase : Union[str, Any] = (-b + sqrt(_UpperCAmelCase )) / (2 * a)
__lowerCAmelCase : Optional[int] = (-b - sqrt(_UpperCAmelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _lowercase ( ) -> int:
__lowerCAmelCase , __lowerCAmelCase : int = quadratic_roots(a=5 ,b=6 ,c=1 )
print(F"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main() | 293 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class a ( a__ ):
snake_case__ = '''megatron-bert'''
def __init__( self , _snake_case=2_90_56 , _snake_case=10_24 , _snake_case=24 , _snake_case=16 , _snake_case=40_96 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=0 , _snake_case="absolute" , _snake_case=True , **_snake_case , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , **_snake_case )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
| 4 | 0 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__a: Any = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
__a: str = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
__a: Union[str, Any] = R'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
lowercase__ : Dict = 0.0
for i, j in zip(_snake_case , _snake_case ):
n_correct += 1.0 if math_equivalence.is_equiv(_snake_case , _snake_case ) else 0.0
lowercase__ : Union[str, Any] = n_correct / len(_snake_case )
return {
"accuracy": accuracy,
}
| 152 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCamelCase ( unittest.TestCase ):
def a_ ( self ):
UpperCamelCase : str = torch.nn.Linear(10 , 10 )
UpperCamelCase : Tuple = torch.optim.SGD(model.parameters() , 0.1 )
UpperCamelCase : Dict = Accelerator()
UpperCamelCase : Optional[Any] = accelerator.prepare(_snake_case )
try:
pickle.loads(pickle.dumps(_snake_case ) )
except Exception as e:
self.fail(f'Accelerated optimizer pickling failed with {e}' )
AcceleratorState._reset_state()
| 499 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class a ( a__ ):
snake_case__ = 42
class a ( a__ , a__ ):
@register_to_config
def __init__( self , _snake_case = 3 , _snake_case = 3 , _snake_case = ("DownEncoderBlock2D",) , _snake_case = ("UpDecoderBlock2D",) , _snake_case = (64,) , _snake_case = 1 , _snake_case = "silu" , _snake_case = 3 , _snake_case = 32 , _snake_case = 2_56 , _snake_case = 32 , _snake_case = None , _snake_case = 0.18_215 , _snake_case = "group" , ):
"""simple docstring"""
super().__init__()
# pass init params to Encoder
lowerCAmelCase = Encoder(
in_channels=_snake_case , out_channels=_snake_case , down_block_types=_snake_case , block_out_channels=_snake_case , layers_per_block=_snake_case , act_fn=_snake_case , norm_num_groups=_snake_case , double_z=_snake_case , )
lowerCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCAmelCase = nn.Convad(_snake_case , _snake_case , 1 )
lowerCAmelCase = VectorQuantizer(_snake_case , _snake_case , beta=0.25 , remap=_snake_case , sane_index_shape=_snake_case )
lowerCAmelCase = nn.Convad(_snake_case , _snake_case , 1 )
# pass init params to Decoder
lowerCAmelCase = Decoder(
in_channels=_snake_case , out_channels=_snake_case , up_block_types=_snake_case , block_out_channels=_snake_case , layers_per_block=_snake_case , act_fn=_snake_case , norm_num_groups=_snake_case , norm_type=_snake_case , )
@apply_forward_hook
def UpperCamelCase__ ( self , _snake_case , _snake_case = True ):
"""simple docstring"""
lowerCAmelCase = self.encoder(_snake_case )
lowerCAmelCase = self.quant_conv(_snake_case )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_snake_case )
@apply_forward_hook
def UpperCamelCase__ ( self , _snake_case , _snake_case = False , _snake_case = True ):
"""simple docstring"""
if not force_not_quantize:
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = self.quantize(_snake_case )
else:
lowerCAmelCase = h
lowerCAmelCase = self.post_quant_conv(_snake_case )
lowerCAmelCase = self.decoder(_snake_case , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case = True ):
"""simple docstring"""
lowerCAmelCase = sample
lowerCAmelCase = self.encode(_snake_case ).latents
lowerCAmelCase = self.decode(_snake_case ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_snake_case )
| 4 | 0 |
from collections import defaultdict
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : str ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
__lowerCAmelCase : Any = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(_snake_case ) )
]
__lowerCAmelCase : Tuple = defaultdict(_snake_case ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
__lowerCAmelCase : List[str] = (1 << len(_snake_case )) - 1
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
__lowerCAmelCase : List[Any] = self.count_ways_until(_snake_case , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
__lowerCAmelCase : List[str] = total_ways_util
return self.dp[mask][task_no]
def SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for i in range(len(_snake_case ) ):
for j in task_performed[i]:
self.task[j].append(_snake_case )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
__UpperCAmelCase = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__UpperCAmelCase = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 651 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
__UpperCamelCase : Optional[Any] = tuple[int, int]
class a :
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = vertices
lowerCAmelCase = {
(min(_snake_case ), max(_snake_case )): weight for edge, weight in edges.items()
}
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowerCAmelCase = weight
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = Graph({min(self.vertices )} , {} )
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowerCAmelCase = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowerCAmelCase = edge
lowerCAmelCase = weight
subgraph.add_edge(_snake_case , _snake_case )
return subgraph
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "p107_network.txt" ):
lowerCAmelCase = os.path.abspath(os.path.dirname(_UpperCAmelCase ) )
lowerCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = {}
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
with open(_UpperCAmelCase ) as f:
lowerCAmelCase = f.read().strip().split('\n' )
lowerCAmelCase = [line.split(',' ) for line in data]
for edgea in range(1 , len(_UpperCAmelCase ) ):
for edgea in range(_UpperCAmelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
lowerCAmelCase = int(adjaceny_matrix[edgea][edgea] )
lowerCAmelCase = Graph(set(range(len(_UpperCAmelCase ) ) ) , _UpperCAmelCase )
lowerCAmelCase = graph.prims_algorithm()
lowerCAmelCase = sum(graph.edges.values() )
lowerCAmelCase = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 4 | 0 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class UpperCamelCase__ ( a__ ):
__SCREAMING_SNAKE_CASE : int = ['pixel_values']
def __init__( self : List[Any] , UpperCamelCase__ : Optional[Any] = True , UpperCamelCase__ : Any = 32 , UpperCamelCase__ : str=PILImageResampling.BILINEAR , UpperCamelCase__ : List[Any] = True , **UpperCamelCase__ : Tuple , ):
'''simple docstring'''
lowercase_ = do_resize
lowercase_ = do_rescale
lowercase_ = size_divisor
lowercase_ = resample
super().__init__(**_snake_case )
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] = None , **UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
lowercase_ , lowercase_ = get_image_size(_snake_case )
# Rounds the height and width down to the closest multiple of size_divisor
lowercase_ = height // size_divisor * size_divisor
lowercase_ = width // size_divisor * size_divisor
lowercase_ = resize(_snake_case , (new_h, new_w) , resample=_snake_case , data_format=_snake_case , **_snake_case )
return image
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] = None , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
return rescale(image=_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case )
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] = None , UpperCamelCase__ : Any = None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Optional[Any] = None , UpperCamelCase__ : Any = None , UpperCamelCase__ : List[str] = ChannelDimension.FIRST , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = size_divisor if size_divisor is not None else self.size_divisor
lowercase_ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("""size_divisor is required for resizing""" )
lowercase_ = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError("""Invalid image(s)""" )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(_snake_case ) for img in images]
if do_resize:
lowercase_ = [self.resize(_snake_case , size_divisor=_snake_case , resample=_snake_case ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(_snake_case , scale=1 / 255 ) for image in images]
lowercase_ = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images]
lowercase_ = {"""pixel_values""": images}
return BatchFeature(data=_snake_case , tensor_type=_snake_case )
| 412 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ):
lowerCAmelCase = np.array([[1, item, train_mtch[i]] for i, item in enumerate(_UpperCAmelCase )] )
lowerCAmelCase = np.array(_UpperCAmelCase )
lowerCAmelCase = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , _UpperCAmelCase ) ) , x.transpose() ) , _UpperCAmelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ):
lowerCAmelCase = (1, 2, 1)
lowerCAmelCase = (1, 1, 0, 7)
lowerCAmelCase = SARIMAX(
_UpperCAmelCase , exog=_UpperCAmelCase , order=_UpperCAmelCase , seasonal_order=_UpperCAmelCase )
lowerCAmelCase = model.fit(disp=_UpperCAmelCase , maxiter=600 , method='nm' )
lowerCAmelCase = model_fit.predict(1 , len(_UpperCAmelCase ) , exog=[test_match] )
return result[0]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ):
lowerCAmelCase = SVR(kernel='rbf' , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = regressor.predict(_UpperCAmelCase )
return y_pred[0]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list ):
train_user.sort()
lowerCAmelCase = np.percentile(_UpperCAmelCase , 25 )
lowerCAmelCase = np.percentile(_UpperCAmelCase , 75 )
lowerCAmelCase = qa - qa
lowerCAmelCase = qa - (iqr * 0.1)
return low_lim
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : float ):
lowerCAmelCase = 0
lowerCAmelCase = 0
for i in list_vote:
if i > actual_result:
lowerCAmelCase = not_safe + 1
else:
if abs(abs(_UpperCAmelCase ) - abs(_UpperCAmelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
__UpperCamelCase : Optional[Any] = [[1_8231, 0.0, 1], [2_2621, 1.0, 2], [1_5675, 0.0, 3], [2_3583, 1.0, 4]]
__UpperCamelCase : Any = pd.DataFrame(
data_input, columns=['''total_user''', '''total_even''', '''days''']
)
__UpperCamelCase : Dict = Normalizer().fit_transform(data_input_df.values)
# split data
__UpperCamelCase : Dict = normalize_df[:, 2].tolist()
__UpperCamelCase : Union[str, Any] = normalize_df[:, 0].tolist()
__UpperCamelCase : List[str] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
__UpperCamelCase : Optional[int] = normalize_df[:, [1, 2]].tolist()
__UpperCamelCase : Tuple = x[: len(x) - 1]
__UpperCamelCase : Any = x[len(x) - 1 :]
# for linear regression & sarimax
__UpperCamelCase : str = total_date[: len(total_date) - 1]
__UpperCamelCase : Union[str, Any] = total_user[: len(total_user) - 1]
__UpperCamelCase : List[Any] = total_match[: len(total_match) - 1]
__UpperCamelCase : Optional[Any] = total_date[len(total_date) - 1 :]
__UpperCamelCase : str = total_user[len(total_user) - 1 :]
__UpperCamelCase : str = total_match[len(total_match) - 1 :]
# voting system with forecasting
__UpperCamelCase : Any = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
__UpperCamelCase : List[str] = '''''' if data_safety_checker(res_vote, tst_user) else '''not '''
print('''Today\'s data is {not_str}safe.''')
| 4 | 0 |
from __future__ import annotations
def lowerCamelCase__ ( __lowerCamelCase : list[int] ):
if not nums:
return 0
__UpperCAmelCase : Any = nums[0]
__UpperCAmelCase : int = 0
for num in nums[1:]:
__UpperCAmelCase , __UpperCAmelCase : Any = (
max_excluding + num,
max(_UpperCAmelCase , _UpperCAmelCase ),
)
return max(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
"""simple docstring"""
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=_UpperCAmelCase , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=_UpperCAmelCase , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=_UpperCAmelCase , default=42 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=_UpperCAmelCase , default=0 , help='cuda_id.' , )
lowerCAmelCase = parser.parse_args()
return args
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] ):
if not len(_UpperCAmelCase ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
lowerCAmelCase ,lowerCAmelCase = imgs[0].size
lowerCAmelCase = Image.new('RGB' , size=(cols * w, rows * h) )
lowerCAmelCase ,lowerCAmelCase = grid.size
for i, img in enumerate(_UpperCAmelCase ):
grid.paste(_UpperCAmelCase , box=(i % cols * w, i // cols * h) )
return grid
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any]="robotic cat with wings" , _UpperCAmelCase : Optional[int]=7.5 , _UpperCAmelCase : Dict=50 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : int=42 , ):
lowerCAmelCase = torch.Generator(pipeline.device ).manual_seed(_UpperCAmelCase )
lowerCAmelCase = pipeline(
_UpperCAmelCase , guidance_scale=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , generator=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , ).images
lowerCAmelCase = int(math.sqrt(_UpperCAmelCase ) )
lowerCAmelCase = image_grid(_UpperCAmelCase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
__UpperCamelCase : Optional[Any] = parse_args()
# Load models and create wrapper for stable diffusion
__UpperCamelCase : List[Any] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
__UpperCamelCase : str = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
__UpperCamelCase : Optional[int] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
__UpperCamelCase : List[str] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
__UpperCamelCase : Tuple = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__UpperCamelCase : Union[str, Any] = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
__UpperCamelCase : Dict = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
__UpperCamelCase : Dict = unet.to(torch.device('''cuda''', args.cuda_id))
__UpperCamelCase : Optional[Any] = pipeline.to(unet.device)
__UpperCamelCase ,__UpperCamelCase : List[Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
__UpperCamelCase : int = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 4 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = DiTPipeline
lowerCAmelCase_ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
lowerCAmelCase_ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
lowerCAmelCase_ = False
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[str] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_snake_case , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=1000 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=_snake_case , )
__SCREAMING_SNAKE_CASE : Dict = AutoencoderKL()
__SCREAMING_SNAKE_CASE : Any = DDIMScheduler()
__SCREAMING_SNAKE_CASE : str = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler}
return components
def UpperCAmelCase__ ( self : str , _A : int , _A : Union[str, Any]=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(_snake_case )
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
__SCREAMING_SNAKE_CASE : Any = {
'''class_labels''': [1],
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''cpu'''
__SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Dict = self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(_snake_case )
__SCREAMING_SNAKE_CASE : int = pipe(**_snake_case ).images
__SCREAMING_SNAKE_CASE : int = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__SCREAMING_SNAKE_CASE : List[Any] = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] )
__SCREAMING_SNAKE_CASE : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_snake_case , 1e-3 )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
self._test_inference_batch_single_identical(relax_max_difference=_snake_case , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[Any] = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' )
pipe.to('''cuda''' )
__SCREAMING_SNAKE_CASE : List[Any] = ['''vase''', '''umbrella''', '''white shark''', '''white wolf''']
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.get_label_ids(_snake_case )
__SCREAMING_SNAKE_CASE : int = pipe(_snake_case , generator=_snake_case , num_inference_steps=40 , output_type='''np''' ).images
for word, image in zip(_snake_case , _snake_case ):
__SCREAMING_SNAKE_CASE : int = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' )
__SCREAMING_SNAKE_CASE : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('''cuda''' )
__SCREAMING_SNAKE_CASE : str = ['''vase''', '''umbrella''']
__SCREAMING_SNAKE_CASE : Optional[Any] = pipe.get_label_ids(_snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = pipe(_snake_case , generator=_snake_case , num_inference_steps=25 , output_type='''np''' ).images
for word, image in zip(_snake_case , _snake_case ):
__SCREAMING_SNAKE_CASE : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 74 |
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : nn.ModuleList , _UpperCAmelCase : nn.ModuleList , _UpperCAmelCase : List[int] ):
lowerCAmelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ), F'{len(_UpperCAmelCase )} != {len(_UpperCAmelCase )}'
dest_layers.load_state_dict(layers_to_copy.state_dict() )
__UpperCamelCase : Optional[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
__UpperCamelCase : int = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] ):
try:
lowerCAmelCase = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'
F' {n_student}' )
return list(range(_UpperCAmelCase ) )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ):
if n_student > n_teacher:
raise ValueError(F'Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}' )
elif n_teacher == n_student:
return list(range(_UpperCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, PreTrainedModel] , _UpperCAmelCase : Union[str, Path] = "student" , _UpperCAmelCase : Union[int, None] = None , _UpperCAmelCase : Union[int, None] = None , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[Any]=None , **_UpperCAmelCase : str , ):
lowerCAmelCase = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
AutoTokenizer.from_pretrained(_UpperCAmelCase ).save_pretrained(_UpperCAmelCase ) # purely for convenience
lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(_UpperCAmelCase ).eval()
else:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), F'teacher must be a model or string got type {type(_UpperCAmelCase )}'
lowerCAmelCase = teacher.config.to_diff_dict()
try:
lowerCAmelCase ,lowerCAmelCase = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
lowerCAmelCase = teacher_e
if d is None:
lowerCAmelCase = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
lowerCAmelCase ,lowerCAmelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
lowerCAmelCase ,lowerCAmelCase = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
lowerCAmelCase = teacher_e
if d is None:
lowerCAmelCase = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(_UpperCAmelCase )
# Copy weights
lowerCAmelCase = teacher.config_class(**_UpperCAmelCase )
lowerCAmelCase = AutoModelForSeqaSeqLM.from_config(_UpperCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
lowerCAmelCase = student.load_state_dict(teacher.state_dict() , strict=_UpperCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
lowerCAmelCase ,lowerCAmelCase = list(range(_UpperCAmelCase ) ), list(range(_UpperCAmelCase ) )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'
F' {save_path}' )
student.save_pretrained(_UpperCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
lowerCAmelCase = pick_layers_to_copy(_UpperCAmelCase , _UpperCAmelCase )
if d_layers_to_copy is None:
lowerCAmelCase = pick_layers_to_copy(_UpperCAmelCase , _UpperCAmelCase )
try:
if hasattr(
_UpperCAmelCase , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , _UpperCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , _UpperCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , _UpperCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , _UpperCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , _UpperCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , _UpperCAmelCase )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}' )
lowerCAmelCase = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(_UpperCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 4 | 0 |
import argparse
from collections import defaultdict
import yaml
UpperCAmelCase : Union[str, Any] = '''docs/source/en/_toctree.yml'''
def _SCREAMING_SNAKE_CASE ( a ) -> Any:
__A : List[str] = defaultdict(_UpperCAmelCase )
for doc in model_doc:
counts[doc["local"]] += 1
__A : Optional[int] = [key for key, value in counts.items() if value > 1]
__A : Tuple = []
for duplicate_key in duplicates:
__A : Dict = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(_UpperCAmelCase ) > 1:
raise ValueError(
F"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(_UpperCAmelCase , key=lambda a : s["title"].lower() )
def _SCREAMING_SNAKE_CASE ( a=False ) -> Tuple:
with open(_UpperCAmelCase , encoding='utf-8' ) as f:
__A : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
__A : Union[str, Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__A : Optional[int] = content[api_idx]['sections']
# Then to the model doc
__A : List[Any] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
__A : Tuple = api_doc[model_idx]['sections']
__A : int = [(idx, section) for idx, section in enumerate(_UpperCAmelCase ) if 'sections' in section]
__A : Tuple = False
for idx, modality_doc in modalities_docs:
__A : Optional[int] = modality_doc['sections']
__A : Union[str, Any] = clean_model_doc_toc(_UpperCAmelCase )
if old_modality_doc != new_modality_doc:
__A : Dict = True
if overwrite:
__A : int = new_modality_doc
if diff:
if overwrite:
__A : Tuple = model_doc
__A : List[Any] = api_doc
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(_UpperCAmelCase , allow_unicode=_UpperCAmelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
UpperCAmelCase : int = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 239 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Dict = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 0 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109 |
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[float] ):
lowerCAmelCase = 0.00
lowerCAmelCase = 0
for resistor in resistors:
if resistor <= 0:
lowerCAmelCase = F'Resistor at index {index} has a negative or zero value!'
raise ValueError(_UpperCAmelCase )
first_sum += 1 / float(_UpperCAmelCase )
index += 1
return 1 / first_sum
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[float] ):
lowerCAmelCase = 0.00
lowerCAmelCase = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowerCAmelCase = F'Resistor at index {index} has a negative value!'
raise ValueError(_UpperCAmelCase )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : Union[str, Any] = logging.get_logger(__name__)
a_ : Tuple = {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase__ ( a__):
"""simple docstring"""
_A = 'bert'
def __init__(self , __a=3_05_22 , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=2 , __a=0.02 , __a=1E-12 , __a=0 , __a="absolute" , __a=True , __a=None , **__a , ):
'''simple docstring'''
super().__init__(pad_token_id=_snake_case , **_snake_case )
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = hidden_act
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = type_vocab_size
lowerCamelCase = initializer_range
lowerCamelCase = layer_norm_eps
lowerCamelCase = position_embedding_type
lowerCamelCase = use_cache
lowerCamelCase = classifier_dropout
class lowerCamelCase__ ( a__):
"""simple docstring"""
@property
def _a (self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] ) | 623 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
'''vinvino02/glpn-kitti''': '''https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json''',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class a ( a__ ):
snake_case__ = '''glpn'''
def __init__( self , _snake_case=3 , _snake_case=4 , _snake_case=[2, 2, 2, 2] , _snake_case=[8, 4, 2, 1] , _snake_case=[32, 64, 1_60, 2_56] , _snake_case=[7, 3, 3, 3] , _snake_case=[4, 2, 2, 2] , _snake_case=[1, 2, 5, 8] , _snake_case=[4, 4, 4, 4] , _snake_case="gelu" , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=0.1 , _snake_case=1E-6 , _snake_case=64 , _snake_case=10 , _snake_case=-1 , **_snake_case , ):
"""simple docstring"""
super().__init__(**_snake_case )
lowerCAmelCase = num_channels
lowerCAmelCase = num_encoder_blocks
lowerCAmelCase = depths
lowerCAmelCase = sr_ratios
lowerCAmelCase = hidden_sizes
lowerCAmelCase = patch_sizes
lowerCAmelCase = strides
lowerCAmelCase = mlp_ratios
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = drop_path_rate
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = decoder_hidden_size
lowerCAmelCase = max_depth
lowerCAmelCase = head_in_index
| 4 | 0 |
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase :Optional[Any] = 2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase :Dict = 5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase :Optional[int] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] )
return (item, float(_UpperCAmelCase ))
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : str = random.randint(0 , len(_UpperCAmelCase ) - 1 )
A_ : str = parent_a[:random_slice] + parent_a[random_slice:]
A_ : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = list(_UpperCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
A_ : Optional[int] = random.choice(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
'''simple docstring'''
A_ : Tuple = []
# Generate more children proportionally to the fitness score.
A_ : Any = int(parent_a[1] * 1_00 ) + 1
A_ : Any = 10 if child_n >= 10 else child_n
for _ in range(_UpperCAmelCase ):
A_ : Optional[int] = population_score[random.randint(0 , _UpperCAmelCase )][0]
A_, A_ : List[str] = crossover(parent_a[0] , _UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
return pop
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = True ):
'''simple docstring'''
if N_POPULATION < N_SELECTED:
A_ : Union[str, Any] = f'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
A_ : List[Any] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
A_ : Optional[Any] = f'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_UpperCAmelCase )
# Generate random starting population.
A_ : Dict = []
for _ in range(_UpperCAmelCase ):
population.append("""""".join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
A_, A_ : List[Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
A_ : Optional[int] = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
A_ : Dict = sorted(_UpperCAmelCase , key=lambda lowerCamelCase__ : x[1] , reverse=_UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'\nGeneration: {generation}'
f'\nTotal Population:{total_population}'
f'\nBest score: {population_score[0][1]}'
f'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
A_ : Optional[int] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCAmelCase )
# Normalize population score to be between 0 and 1.
A_ : Optional[Any] = [
(item, score / len(_UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCAmelCase ):
population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase :List[Any] = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
lowerCamelCase :str = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
lowerCamelCase :Any = basic(target_str, genes_list)
print(
F"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"
) | 667 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class a :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=2 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , _snake_case=10_00 , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
lowerCAmelCase = range_bbox
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCAmelCase = bbox[i, j, 3]
lowerCAmelCase = bbox[i, j, 1]
lowerCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCAmelCase = bbox[i, j, 2]
lowerCAmelCase = bbox[i, j, 0]
lowerCAmelCase = t
lowerCAmelCase = tf.convert_to_tensor(_snake_case )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMModel(config=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , token_type_ids=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForMaskedLM(config=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFLayoutLMForSequenceClassification(config=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFLayoutLMForTokenClassification(config=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForQuestionAnswering(config=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
snake_case__ = (
{
'''feature-extraction''': TFLayoutLMModel,
'''fill-mask''': TFLayoutLMForMaskedLM,
'''text-classification''': TFLayoutLMForSequenceClassification,
'''token-classification''': TFLayoutLMForTokenClassification,
'''zero-shot''': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ = False
snake_case__ = True
snake_case__ = 1_0
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = TFLayoutLMModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ():
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
lowerCAmelCase = tf.convert_to_tensor([[101,1019,1014,1016,1037,1_2849,4747,1004,1_4246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,1_1300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,1_9274,2772,6205,2_7814,1_6147,1_6147,4343,2047,1_0283,1_0969,1_4389,1012,2338,102]] ) # noqa: E231
lowerCAmelCase = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
lowerCAmelCase = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
lowerCAmelCase = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
lowerCAmelCase = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class a ( unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase = model(input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
# test the sequence output on [0, :3, :3]
lowerCAmelCase = tf.convert_to_tensor(
[[0.1_785, -0.1_947, -0.0_425], [-0.3_254, -0.2_807, 0.2_553], [-0.5_391, -0.3_322, 0.3_364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _snake_case , atol=1E-3 ) )
# test the pooled output on [1, :3]
lowerCAmelCase = tf.convert_to_tensor([-0.6_580, -0.0_214, 0.8_552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _snake_case , atol=1E-3 ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase = model(
input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
lowerCAmelCase = outputs.loss
lowerCAmelCase = (2,)
self.assertEqual(loss.shape , _snake_case )
# test the shape of the logits
lowerCAmelCase = outputs.logits
lowerCAmelCase = (2, 2)
self.assertEqual(logits.shape , _snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=13 )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase = model(
input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
# test the shape of the logits
lowerCAmelCase = outputs.logits
lowerCAmelCase = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , _snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase = model(input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
# test the shape of the logits
lowerCAmelCase = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , _snake_case )
self.assertEqual(outputs.end_logits.shape , _snake_case )
| 4 | 0 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> List[str]:
__lowerCAmelCase : List[Any] = np.array([[1, item, train_mtch[i]] for i, item in enumerate(_UpperCAmelCase )] )
__lowerCAmelCase : Dict = np.array(_UpperCAmelCase )
__lowerCAmelCase : Union[str, Any] = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() ,_UpperCAmelCase ) ) ,x.transpose() ) ,_UpperCAmelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Optional[int]:
__lowerCAmelCase : Optional[Any] = (1, 2, 1)
__lowerCAmelCase : str = (1, 1, 0, 7)
__lowerCAmelCase : Optional[int] = SARIMAX(
_UpperCAmelCase ,exog=_UpperCAmelCase ,order=_UpperCAmelCase ,seasonal_order=_UpperCAmelCase )
__lowerCAmelCase : str = model.fit(disp=_UpperCAmelCase ,maxiter=600 ,method="nm" )
__lowerCAmelCase : Union[str, Any] = model_fit.predict(1 ,len(_UpperCAmelCase ) ,exog=[test_match] )
return result[0]
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> List[str]:
__lowerCAmelCase : Any = SVR(kernel="rbf" ,C=1 ,gamma=0.1 ,epsilon=0.1 )
regressor.fit(_UpperCAmelCase ,_UpperCAmelCase )
__lowerCAmelCase : Optional[int] = regressor.predict(_UpperCAmelCase )
return y_pred[0]
def _lowercase ( __snake_case ) -> Union[str, Any]:
train_user.sort()
__lowerCAmelCase : Dict = np.percentile(_UpperCAmelCase ,25 )
__lowerCAmelCase : List[Any] = np.percentile(_UpperCAmelCase ,75 )
__lowerCAmelCase : Tuple = qa - qa
__lowerCAmelCase : Any = qa - (iqr * 0.1)
return low_lim
def _lowercase ( __snake_case ,__snake_case ) -> str:
__lowerCAmelCase : int = 0
__lowerCAmelCase : Dict = 0
for i in list_vote:
if i > actual_result:
__lowerCAmelCase : List[str] = not_safe + 1
else:
if abs(abs(_UpperCAmelCase ) - abs(_UpperCAmelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
__snake_case : Optional[Any] = [[18_231, 0.0, 1], [22_621, 1.0, 2], [15_675, 0.0, 3], [23_583, 1.0, 4]]
__snake_case : Any = pd.DataFrame(
data_input, columns=['total_user', 'total_even', 'days']
)
__snake_case : Dict = Normalizer().fit_transform(data_input_df.values)
# split data
__snake_case : Dict = normalize_df[:, 2].tolist()
__snake_case : Union[str, Any] = normalize_df[:, 0].tolist()
__snake_case : List[str] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
__snake_case : Optional[int] = normalize_df[:, [1, 2]].tolist()
__snake_case : Tuple = x[: len(x) - 1]
__snake_case : Any = x[len(x) - 1 :]
# for linear regression & sarimax
__snake_case : str = total_date[: len(total_date) - 1]
__snake_case : Union[str, Any] = total_user[: len(total_user) - 1]
__snake_case : List[Any] = total_match[: len(total_match) - 1]
__snake_case : Optional[Any] = total_date[len(total_date) - 1 :]
__snake_case : str = total_user[len(total_user) - 1 :]
__snake_case : str = total_match[len(total_match) - 1 :]
# voting system with forecasting
__snake_case : Any = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
__snake_case : List[str] = '''''' if data_safety_checker(res_vote, tst_user) else '''not '''
print('Today\'s data is {not_str}safe.') | 293 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__UpperCamelCase : Union[str, Any] = '''examples/'''
__UpperCamelCase : str = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__UpperCamelCase : List[str] = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
__UpperCamelCase : Optional[int] = '''README.md'''
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ):
with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCAmelCase = f.read()
lowerCAmelCase ,lowerCAmelCase = REPLACE_PATTERNS[pattern]
lowerCAmelCase = replace.replace('VERSION' , _UpperCAmelCase )
lowerCAmelCase = re_pattern.sub(_UpperCAmelCase , _UpperCAmelCase )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] ):
for folder, directories, fnames in os.walk(_UpperCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase , pattern='examples' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Dict=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if not patch:
update_version_in_examples(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = '🤗 Transformers currently provides the following architectures'
lowerCAmelCase = '1. Want to contribute a new model?'
with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCAmelCase = f.readlines()
# Find the start of the list.
lowerCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
lowerCAmelCase = lines[index].replace(
'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , )
index += 1
with open(_UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ():
with open(REPLACE_FILES['init'] , 'r' ) as f:
lowerCAmelCase = f.read()
lowerCAmelCase = REPLACE_PATTERNS['init'][0].search(_UpperCAmelCase ).groups()[0]
return packaging.version.parse(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple=False ):
lowerCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
lowerCAmelCase = default_version.base_version
elif patch:
lowerCAmelCase = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
lowerCAmelCase = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
lowerCAmelCase = input(F'Which version are you releasing? [{default_version}]' )
if len(_UpperCAmelCase ) == 0:
lowerCAmelCase = default_version
print(F'Updating version to {version}.' )
global_version_update(_UpperCAmelCase , patch=_UpperCAmelCase )
if not patch:
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = get_version()
lowerCAmelCase = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
lowerCAmelCase = current_version.base_version
# Check with the user we got that right.
lowerCAmelCase = input(F'Which version are we developing now? [{dev_version}]' )
if len(_UpperCAmelCase ) == 0:
lowerCAmelCase = dev_version
print(F'Updating version to {version}.' )
global_version_update(_UpperCAmelCase )
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__UpperCamelCase : Optional[int] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 4 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : Tuple = tempfile.mkdtemp()
# fmt: off
lowercase__ : Dict = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowercase__ : Union[str, Any] = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
lowercase__ : List[Any] = os.path.join(self.tmpdirname , _snake_case )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_snake_case , _snake_case )
def _lowerCAmelCase( self , **__lowerCAmelCase ) -> List[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def _lowerCAmelCase( self , **__lowerCAmelCase ) -> Tuple:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_snake_case )
def _lowerCAmelCase( self ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__ : str = [Image.fromarray(np.moveaxis(_snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase( self ) -> str:
lowercase__ : Any = self.get_tokenizer()
lowercase__ : Any = self.get_image_processor()
lowercase__ : List[Any] = VisionTextDualEncoderProcessor(tokenizer=_snake_case , image_processor=_snake_case )
processor.save_pretrained(self.tmpdirname )
lowercase__ : int = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _snake_case )
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : Optional[Any] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ : List[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowercase__ : Optional[Any] = self.get_image_processor(do_normalize=_snake_case , padding_value=1.0 )
lowercase__ : Dict = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _snake_case )
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : List[Any] = self.get_tokenizer()
lowercase__ : Dict = VisionTextDualEncoderProcessor(tokenizer=_snake_case , image_processor=_snake_case )
lowercase__ : Optional[Any] = self.prepare_image_inputs()
lowercase__ : Any = image_processor(_snake_case , return_tensors='''np''' )
lowercase__ : List[Any] = processor(images=_snake_case , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowerCAmelCase( self ) -> int:
lowercase__ : str = self.get_image_processor()
lowercase__ : Dict = self.get_tokenizer()
lowercase__ : Tuple = VisionTextDualEncoderProcessor(tokenizer=_snake_case , image_processor=_snake_case )
lowercase__ : Union[str, Any] = '''lower newer'''
lowercase__ : Dict = processor(text=_snake_case )
lowercase__ : Dict = tokenizer(_snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : Optional[int] = self.get_image_processor()
lowercase__ : List[str] = self.get_tokenizer()
lowercase__ : List[str] = VisionTextDualEncoderProcessor(tokenizer=_snake_case , image_processor=_snake_case )
lowercase__ : Tuple = '''lower newer'''
lowercase__ : Optional[Any] = self.prepare_image_inputs()
lowercase__ : Optional[Any] = processor(text=_snake_case , images=_snake_case )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(_snake_case ):
processor()
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : Optional[Any] = self.get_image_processor()
lowercase__ : Tuple = self.get_tokenizer()
lowercase__ : Any = VisionTextDualEncoderProcessor(tokenizer=_snake_case , image_processor=_snake_case )
lowercase__ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ : int = processor.batch_decode(_snake_case )
lowercase__ : Any = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def _lowerCAmelCase( self ) -> str:
lowercase__ : Any = self.get_image_processor()
lowercase__ : Optional[int] = self.get_tokenizer()
lowercase__ : Any = VisionTextDualEncoderProcessor(tokenizer=_snake_case , image_processor=_snake_case )
lowercase__ : Tuple = '''lower newer'''
lowercase__ : List[str] = self.prepare_image_inputs()
lowercase__ : List[str] = processor(text=_snake_case , images=_snake_case )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 152 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__UpperCamelCase : Optional[int] = pytest.mark.integration
@require_faiss
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_snake_case ) for x in np.arange(30 ).tolist()]} )
return dset
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = self._create_dummy_dataset()
lowerCAmelCase = dset.map(
lambda _snake_case , _snake_case : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_snake_case , keep_in_memory=_snake_case )
lowerCAmelCase = dset.add_faiss_index('vecs' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_snake_case ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(_snake_case , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
from elasticsearch import Elasticsearch
lowerCAmelCase = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCAmelCase = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
lowerCAmelCase = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=_snake_case )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase = 1
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case )
self.assertRaises(_snake_case , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCAmelCase = np.eye(5 , dtype=np.floataa )[::-1]
lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case )
self.assertRaises(_snake_case , index.search_batch , queries[0] )
lowerCAmelCase = [scores[0] for scores in total_scores]
lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_snake_case ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCAmelCase = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(_snake_case ):
lowerCAmelCase = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = faiss.IndexFlat(5 )
lowerCAmelCase = FaissIndex(custom_index=_snake_case )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_snake_case ) as tmp_file:
index.save(tmp_file.name )
lowerCAmelCase = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase = 1
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict ):
import faiss
lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCAmelCase = 'index.faiss'
lowerCAmelCase = F'mock://{index_name}'
index.save(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCAmelCase = FaissIndex.load(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase = 1
lowerCAmelCase ,lowerCAmelCase = index.search(_UpperCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCAmelCase = Elasticsearch()
lowerCAmelCase = {'acknowledged': True}
lowerCAmelCase = ElasticSearchIndex(es_client=_snake_case )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
lowerCAmelCase = 'foo'
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCAmelCase = 'foo'
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCAmelCase = ['foo', 'bar', 'foobar']
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case )
lowerCAmelCase = [scores[0] for scores in total_scores]
lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_snake_case ) , 0 )
self.assertListEqual([1, 1, 1] , _snake_case )
# batched queries with timeout
lowerCAmelCase = ['foo', 'bar', 'foobar']
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case , request_timeout=30 )
lowerCAmelCase = [scores[0] for scores in total_scores]
lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_snake_case ) , 0 )
self.assertListEqual([1, 1, 1] , _snake_case )
| 4 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class lowerCamelCase :
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ):
UpperCamelCase : Union[str, Any] = parent
UpperCamelCase : List[Any] = batch_size
UpperCamelCase : Union[str, Any] = seq_length
UpperCamelCase : Union[str, Any] = is_training
UpperCamelCase : Any = use_token_type_ids
UpperCamelCase : List[str] = use_labels
UpperCamelCase : Optional[int] = vocab_size
UpperCamelCase : List[str] = hidden_size
UpperCamelCase : Union[str, Any] = num_hidden_layers
UpperCamelCase : List[str] = num_attention_heads
UpperCamelCase : Union[str, Any] = intermediate_size
UpperCamelCase : Optional[Any] = hidden_act
UpperCamelCase : Optional[int] = hidden_dropout_prob
UpperCamelCase : Optional[int] = attention_probs_dropout_prob
UpperCamelCase : Optional[int] = max_position_embeddings
UpperCamelCase : Optional[Any] = type_vocab_size
UpperCamelCase : Tuple = type_sequence_label_size
UpperCamelCase : Union[str, Any] = initializer_range
UpperCamelCase : Tuple = num_labels
UpperCamelCase : Optional[int] = num_choices
UpperCamelCase : Optional[Any] = scope
UpperCamelCase : List[str] = self.vocab_size - 1
def a_ ( self ):
UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : List[str] = None
if self.use_token_type_ids:
UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase : List[Any] = None
UpperCamelCase : Optional[int] = None
UpperCamelCase : Dict = None
if self.use_labels:
UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : List[Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
UpperCamelCase : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = OpenAIGPTModel(config=_snake_case )
model.to(_snake_case )
model.eval()
UpperCamelCase : Any = model(_snake_case , token_type_ids=_snake_case , head_mask=_snake_case )
UpperCamelCase : Optional[int] = model(_snake_case , token_type_ids=_snake_case )
UpperCamelCase : Union[str, Any] = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : int = OpenAIGPTLMHeadModel(_snake_case )
model.to(_snake_case )
model.eval()
UpperCamelCase : Any = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : int = OpenAIGPTDoubleHeadsModel(_snake_case )
model.to(_snake_case )
model.eval()
UpperCamelCase : Dict = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Dict = self.num_labels
UpperCamelCase : Any = OpenAIGPTForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Any = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ ( self ):
UpperCamelCase : Any = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : List[str] = config_and_inputs
UpperCamelCase : int = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase ( a__ , a__ , a__ , unittest.TestCase ):
lowercase : Dict = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowercase : Any = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowercase : List[str] = (
{
'feature-extraction': OpenAIGPTModel,
'text-classification': OpenAIGPTForSequenceClassification,
'text-generation': OpenAIGPTLMHeadModel,
'zero-shot': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
UpperCamelCase : Any = super()._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
UpperCamelCase : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_snake_case , )
UpperCamelCase : int = inputs_dict["""labels"""]
UpperCamelCase : Tuple = inputs_dict["""labels"""]
UpperCamelCase : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_snake_case , )
UpperCamelCase : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_snake_case )
return inputs_dict
def a_ ( self ):
UpperCamelCase : Union[str, Any] = OpenAIGPTModelTester(self )
UpperCamelCase : int = ConfigTester(self , config_class=_snake_case , n_embd=37 )
def a_ ( self ):
self.config_tester.run_common_tests()
def a_ ( self ):
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_snake_case )
def a_ ( self ):
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_snake_case )
def a_ ( self ):
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_snake_case )
def a_ ( self ):
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_snake_case )
@slow
def a_ ( self ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : str = OpenAIGPTModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def a_ ( self ):
UpperCamelCase : Any = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(_snake_case )
UpperCamelCase : Tuple = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=_snake_case ) # the president is
UpperCamelCase : Tuple = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
UpperCamelCase : Optional[int] = model.generate(_snake_case , do_sample=_snake_case )
self.assertListEqual(output_ids[0].tolist() , _snake_case )
| 499 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = IFInpaintingPipeline
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._get_dummy_components()
def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(_snake_case )
else:
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_local()
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 4 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCAmelCase = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 651 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class a :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
lowerCAmelCase = self.vocab_size - 1
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , head_mask=_snake_case )
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTLMHeadModel(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTDoubleHeadsModel(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = OpenAIGPTForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class a ( a__ , a__ , a__ , unittest.TestCase ):
snake_case__ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
snake_case__ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
snake_case__ = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case=False ):
"""simple docstring"""
lowerCAmelCase = super()._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_snake_case , )
lowerCAmelCase = inputs_dict['labels']
lowerCAmelCase = inputs_dict['labels']
lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_snake_case , )
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_snake_case )
return inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , n_embd=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = OpenAIGPTModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@require_torch
class a ( unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(_snake_case )
lowerCAmelCase = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=_snake_case ) # the president is
lowerCAmelCase = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCAmelCase = model.generate(_snake_case , do_sample=_snake_case )
self.assertListEqual(output_ids[0].tolist() , _snake_case )
| 4 | 0 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class UpperCamelCase__ ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=13 , UpperCamelCase__ : List[Any]=7 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[Any]=99 , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : List[Any]=5 , UpperCamelCase__ : Any=4 , UpperCamelCase__ : Union[str, Any]=37 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : List[str]=16 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Any=4 , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_attention_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_choices
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_attention_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class UpperCamelCase__ ( a__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = True
__SCREAMING_SNAKE_CASE : Any = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = FlaxRoFormerModelTester(self )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase_ = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_snake_case )
lowercase_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_snake_case )
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
lowercase_ = jnp.array([[0, 1, 2, 3, 4, 5]] )
lowercase_ = model(_snake_case )[0]
lowercase_ = 50_000
lowercase_ = (1, 6, vocab_size)
self.assertEqual(output.shape , _snake_case )
lowercase_ = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _snake_case , atol=1e-4 ) )
| 412 |
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__UpperCamelCase : str = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path' , type=_UpperCAmelCase , default='data/dump.txt' , help='The path to the data.' )
parser.add_argument('--tokenizer_type' , type=_UpperCAmelCase , default='bert' , choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name' , type=_UpperCAmelCase , default='bert-base-uncased' , help='The tokenizer to use.' )
parser.add_argument('--dump_file' , type=_UpperCAmelCase , default='data/dump' , help='The dump file prefix.' )
lowerCAmelCase = parser.parse_args()
logger.info(F'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
lowerCAmelCase = BertTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase = tokenizer.special_tokens_map['cls_token'] # `[CLS]`
lowerCAmelCase = tokenizer.special_tokens_map['sep_token'] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowerCAmelCase = RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase = tokenizer.special_tokens_map['cls_token'] # `<s>`
lowerCAmelCase = tokenizer.special_tokens_map['sep_token'] # `</s>`
elif args.tokenizer_type == "gpt2":
lowerCAmelCase = GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>`
lowerCAmelCase = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>`
logger.info(F'Loading text from {args.file_path}' )
with open(args.file_path , 'r' , encoding='utf8' ) as fp:
lowerCAmelCase = fp.readlines()
logger.info('Start encoding' )
logger.info(F'{len(_UpperCAmelCase )} examples to process.' )
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 1_0000
lowerCAmelCase = time.time()
for text in data:
lowerCAmelCase = F'{bos} {text.strip()} {sep}'
lowerCAmelCase = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
rslt.append(_UpperCAmelCase )
iter += 1
if iter % interval == 0:
lowerCAmelCase = time.time()
logger.info(F'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
lowerCAmelCase = time.time()
logger.info('Finished binarization' )
logger.info(F'{len(_UpperCAmelCase )} examples processed.' )
lowerCAmelCase = F'{args.dump_file}.{args.tokenizer_name}.pickle'
lowerCAmelCase = tokenizer.vocab_size
if vocab_size < (1 << 16):
lowerCAmelCase = [np.uintaa(_UpperCAmelCase ) for d in rslt]
else:
lowerCAmelCase = [np.intaa(_UpperCAmelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'Dump to {dp_file}' )
with open(_UpperCAmelCase , 'wb' ) as handle:
pickle.dump(rslt_ , _UpperCAmelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 4 | 0 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a : Tuple = logging.get_logger(__name__)
class a ( a__ ):
"""simple docstring"""
a : Union[str, Any] = ['input_features', 'is_longer']
def __init__( self : Dict , __lowercase : int=64 , __lowercase : str=48000 , __lowercase : Optional[Any]=480 , __lowercase : int=10 , __lowercase : Tuple=1024 , __lowercase : List[Any]=0.0 , __lowercase : Dict=False , __lowercase : Any = 0 , __lowercase : int = 14000 , __lowercase : List[str] = None , __lowercase : str = "fusion" , __lowercase : Union[str, Any] = "repeatpad" , **__lowercase : Optional[int] , ) -> Tuple:
super().__init__(
feature_size=_snake_case , sampling_rate=_snake_case , padding_value=_snake_case , return_attention_mask=_snake_case , **_snake_case , )
__UpperCAmelCase : List[Any] = top_db
__UpperCAmelCase : List[Any] = truncation
__UpperCAmelCase : Optional[int] = padding
__UpperCAmelCase : List[str] = fft_window_size
__UpperCAmelCase : Any = (fft_window_size >> 1) + 1
__UpperCAmelCase : Union[str, Any] = hop_length
__UpperCAmelCase : Dict = max_length_s
__UpperCAmelCase : List[Any] = max_length_s * sampling_rate
__UpperCAmelCase : str = sampling_rate
__UpperCAmelCase : int = frequency_min
__UpperCAmelCase : Dict = frequency_max
__UpperCAmelCase : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_snake_case , min_frequency=_snake_case , max_frequency=_snake_case , sampling_rate=_snake_case , norm=_snake_case , mel_scale="""htk""" , )
__UpperCAmelCase : Any = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_snake_case , min_frequency=_snake_case , max_frequency=_snake_case , sampling_rate=_snake_case , norm="""slaney""" , mel_scale="""slaney""" , )
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
__UpperCAmelCase : int = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : Any = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def UpperCAmelCase ( self : Optional[Any] , __lowercase : Optional[Any] , __lowercase : Dict = None ) -> List[Any]:
__UpperCAmelCase : str = spectrogram(
_snake_case , window_function(self.fft_window_size , """hann""" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_snake_case , log_mel="""dB""" , )
return log_mel_spectrogram.T
def UpperCAmelCase ( self : Optional[Any] , __lowercase : Tuple , __lowercase : Tuple , __lowercase : Optional[int] ) -> Optional[int]:
__UpperCAmelCase : Tuple = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
__UpperCAmelCase : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
__UpperCAmelCase : int = [0]
# randomly choose index for each part
__UpperCAmelCase : Optional[int] = np.random.choice(ranges[0] )
__UpperCAmelCase : List[str] = np.random.choice(ranges[1] )
__UpperCAmelCase : List[str] = np.random.choice(ranges[2] )
__UpperCAmelCase : Union[str, Any] = mel[idx_front : idx_front + chunk_frames, :]
__UpperCAmelCase : Union[str, Any] = mel[idx_middle : idx_middle + chunk_frames, :]
__UpperCAmelCase : int = mel[idx_back : idx_back + chunk_frames, :]
__UpperCAmelCase : Any = torch.tensor(mel[None, None, :] )
__UpperCAmelCase : Optional[int] = torch.nn.functional.interpolate(
_snake_case , size=[chunk_frames, 64] , mode="""bilinear""" , align_corners=_snake_case )
__UpperCAmelCase : Tuple = mel_shrink[0][0].numpy()
__UpperCAmelCase : Any = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def UpperCAmelCase ( self : Optional[Any] , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : Union[str, Any] , __lowercase : Dict ) -> str:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
__UpperCAmelCase : List[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
__UpperCAmelCase : str = len(_snake_case ) - max_length
__UpperCAmelCase : Tuple = np.random.randint(0 , overflow + 1 )
__UpperCAmelCase : Tuple = waveform[idx : idx + max_length]
__UpperCAmelCase : str = self._np_extract_fbank_features(_snake_case , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
__UpperCAmelCase : str = self._np_extract_fbank_features(_snake_case , self.mel_filters )
__UpperCAmelCase : Any = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
__UpperCAmelCase : Union[str, Any] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
__UpperCAmelCase : List[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
__UpperCAmelCase : Any = False
else:
__UpperCAmelCase : Dict = self._random_mel_fusion(_snake_case , _snake_case , _snake_case )
__UpperCAmelCase : Optional[Any] = True
else:
raise NotImplementedError(f"""data_truncating {truncation} not implemented""" )
else:
__UpperCAmelCase : List[str] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
__UpperCAmelCase : List[str] = int(max_length / len(_snake_case ) )
__UpperCAmelCase : Tuple = np.stack(np.tile(_snake_case , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
__UpperCAmelCase : List[Any] = int(max_length / len(_snake_case ) )
__UpperCAmelCase : int = np.stack(np.tile(_snake_case , _snake_case ) )
__UpperCAmelCase : List[str] = np.pad(_snake_case , (0, max_length - waveform.shape[0]) , mode="""constant""" , constant_values=0 )
if truncation == "fusion":
__UpperCAmelCase : Tuple = self._np_extract_fbank_features(_snake_case , self.mel_filters )
__UpperCAmelCase : Optional[int] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
__UpperCAmelCase : List[str] = self._np_extract_fbank_features(_snake_case , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : int , __lowercase : Union[str, Any] , __lowercase : List[Any] = None , __lowercase : Any = None , __lowercase : List[Any] = None , __lowercase : Dict = None , __lowercase : List[Any] = None , **__lowercase : List[str] , ) -> List[Any]:
__UpperCAmelCase : str = truncation if truncation is not None else self.truncation
__UpperCAmelCase : int = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__UpperCAmelCase : Optional[int] = isinstance(_snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
__UpperCAmelCase : List[Any] = is_batched_numpy or (
isinstance(_snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__UpperCAmelCase : Optional[int] = [np.asarray(_snake_case , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_snake_case , np.ndarray ):
__UpperCAmelCase : List[str] = np.asarray(_snake_case , dtype=np.floataa )
elif isinstance(_snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__UpperCAmelCase : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__UpperCAmelCase : Tuple = [np.asarray(_snake_case )]
# convert to mel spectrogram, truncate and pad if needed.
__UpperCAmelCase : List[Any] = [
self._get_input_mel(_snake_case , max_length if max_length else self.nb_max_samples , _snake_case , _snake_case )
for waveform in raw_speech
]
__UpperCAmelCase : str = []
__UpperCAmelCase : int = []
for mel, longer in padded_inputs:
input_mel.append(_snake_case )
is_longer.append(_snake_case )
if truncation == "fusion" and sum(_snake_case ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
__UpperCAmelCase : int = np.random.randint(0 , len(_snake_case ) )
__UpperCAmelCase : Union[str, Any] = True
if isinstance(input_mel[0] , _snake_case ):
__UpperCAmelCase : int = [np.asarray(_snake_case , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
__UpperCAmelCase : Dict = [[longer] for longer in is_longer]
__UpperCAmelCase : str = {"""input_features""": input_mel, """is_longer""": is_longer}
__UpperCAmelCase : Union[str, Any] = BatchFeature(_snake_case )
if return_tensors is not None:
__UpperCAmelCase : int = input_features.convert_to_tensors(_snake_case )
return input_features
| 63 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class a ( a__ ):
snake_case__ = '''bert'''
def __init__( self , _snake_case=3_05_22 , _snake_case=7_68 , _snake_case=12 , _snake_case=12 , _snake_case=30_72 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=0 , _snake_case="absolute" , _snake_case=True , _snake_case=None , **_snake_case , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , **_snake_case )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
lowerCAmelCase = classifier_dropout
class a ( a__ ):
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 4 | 0 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = args.pruning_method
__SCREAMING_SNAKE_CASE : List[str] = args.threshold
__SCREAMING_SNAKE_CASE : Tuple = args.model_name_or_path.rstrip('''/''' )
__SCREAMING_SNAKE_CASE : Tuple = args.target_model_path
print(F'''Load fine-pruned model from {model_name_or_path}''' )
__SCREAMING_SNAKE_CASE : Tuple = torch.load(os.path.join(_UpperCAmelCase , '''pytorch_model.bin''' ) )
__SCREAMING_SNAKE_CASE : List[Any] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = tensor
print(F'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = tensor
print(F'''Copied layer {name}''' )
elif "bias" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = tensor
print(F'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
__SCREAMING_SNAKE_CASE : int = MagnitudeBinarizer.apply(inputs=_UpperCAmelCase , threshold=_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
__SCREAMING_SNAKE_CASE : Optional[int] = name[:-6]
__SCREAMING_SNAKE_CASE : str = model[F'''{prefix_}mask_scores''']
__SCREAMING_SNAKE_CASE : Any = TopKBinarizer.apply(_UpperCAmelCase , _UpperCAmelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
__SCREAMING_SNAKE_CASE : Dict = name[:-6]
__SCREAMING_SNAKE_CASE : List[Any] = model[F'''{prefix_}mask_scores''']
__SCREAMING_SNAKE_CASE : Dict = ThresholdBinarizer.apply(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__SCREAMING_SNAKE_CASE : Any = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
__SCREAMING_SNAKE_CASE : Dict = name[:-6]
__SCREAMING_SNAKE_CASE : Optional[int] = model[F'''{prefix_}mask_scores''']
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = -0.1, 1.1
__SCREAMING_SNAKE_CASE : Any = torch.sigmoid(_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = s * (r - l) + l
__SCREAMING_SNAKE_CASE : List[str] = s_bar.clamp(min=0.0 , max=1.0 )
__SCREAMING_SNAKE_CASE : str = tensor * mask
print(F'''Pruned layer {name}''' )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
__SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
os.path.dirname(_UpperCAmelCase ) , F'''bertarized_{os.path.basename(_UpperCAmelCase )}''' )
if not os.path.isdir(_UpperCAmelCase ):
shutil.copytree(_UpperCAmelCase , _UpperCAmelCase )
print(F'''\nCreated folder {target_model_path}''' )
torch.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
lowercase_ = parser.parse_args()
main(args)
| 74 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a ( a__ , unittest.TestCase ):
snake_case__ = DanceDiffusionPipeline
snake_case__ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {
'''callback''',
'''latents''',
'''callback_steps''',
'''output_type''',
'''num_images_per_prompt''',
}
snake_case__ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_snake_case , use_timestep_embedding=_snake_case , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
lowerCAmelCase = IPNDMScheduler()
lowerCAmelCase = {
'unet': unet,
'scheduler': scheduler,
}
return components
def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(_snake_case )
else:
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowerCAmelCase = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = DanceDiffusionPipeline(**_snake_case )
lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = self.get_dummy_inputs(_snake_case )
lowerCAmelCase = pipe(**_snake_case )
lowerCAmelCase = output.audios
lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowerCAmelCase = np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = torch_device
lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(generator=_snake_case , num_inference_steps=1_00 , audio_length_in_s=4.096 )
lowerCAmelCase = output.audios
lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase = np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = torch_device
lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(generator=_snake_case , num_inference_steps=1_00 , audio_length_in_s=4.096 )
lowerCAmelCase = output.audios
lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase = np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 4 | 0 |
UpperCAmelCase : int = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def _SCREAMING_SNAKE_CASE ( a ) -> Optional[int]:
# Make sure the supplied data is a bytes-like object
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__A : Union[str, Any] = F"""a bytes-like object is required, not \'{data.__class__.__name__}\'"""
raise TypeError(_UpperCAmelCase )
__A : Dict = ''.join(bin(_UpperCAmelCase )[2:].zfill(8 ) for byte in data )
__A : Optional[int] = len(_UpperCAmelCase ) % 6 != 0
if padding_needed:
# The padding that will be added later
__A : Dict = b'=' * ((6 - len(_UpperCAmelCase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_UpperCAmelCase ) % 6)
else:
__A : Dict = b''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_UpperCAmelCase ) , 6 ) ).encode()
+ padding
)
def _SCREAMING_SNAKE_CASE ( a ) -> List[str]:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__A : Dict = (
'argument should be a bytes-like object or ASCII string, '
F"""not \'{encoded_data.__class__.__name__}\'"""
)
raise TypeError(_UpperCAmelCase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
try:
__A : Dict = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
__A : Tuple = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_UpperCAmelCase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__A : Dict = encoded_data[:-padding]
__A : Dict = ''.join(
bin(B64_CHARSET.index(_UpperCAmelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__A : str = ''.join(
bin(B64_CHARSET.index(_UpperCAmelCase ) )[2:].zfill(6 ) for char in encoded_data )
__A : Dict = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_UpperCAmelCase ) , 8 )
]
return bytes(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 239 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class a :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=False , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , use_stable_embedding=_snake_case , )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenLlamaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = OpenLlamaModel(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , )
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , )
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = OpenLlamaForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = OpenLlamaForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
# first forward pass
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , use_cache=_snake_case , )
lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , output_hidden_states=_snake_case , )['hidden_states'][0]
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , past_key_values=_snake_case , output_hidden_states=_snake_case , )['hidden_states'][0]
# select random slice
lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-3 ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a ( a__ , a__ , a__ , unittest.TestCase ):
snake_case__ = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
snake_case__ = (OpenLlamaForCausalLM,) if is_torch_available() else ()
snake_case__ = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = OpenLlamaModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase = type
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(_snake_case )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = 'single_label_classification'
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(_snake_case )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = 'multi_label_classification'
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(_snake_case )
lowerCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = ids_tensor([1, 10] , config.vocab_size )
lowerCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase = OpenLlamaModel(_snake_case )
original_model.to(_snake_case )
original_model.eval()
lowerCAmelCase = original_model(_snake_case ).last_hidden_state
lowerCAmelCase = original_model(_snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase = {'type': scaling_type, 'factor': 10.0}
lowerCAmelCase = OpenLlamaModel(_snake_case )
scaled_model.to(_snake_case )
scaled_model.eval()
lowerCAmelCase = scaled_model(_snake_case ).last_hidden_state
lowerCAmelCase = scaled_model(_snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
| 4 | 0 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
a = logging.get_logger(__name__)
a = {'''vocab_file''': '''spiece.model'''}
a = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
a = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
a = 0
a = 1
a = 2
a = 3
a = 4
class __a ( a__ ):
__UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES
__UpperCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Optional[Any] = 'left'
def __init__( self : str ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : str=False ,lowerCamelCase : Any=True ,lowerCamelCase : Union[str, Any]=False ,lowerCamelCase : Optional[int]="<s>" ,lowerCamelCase : int="</s>" ,lowerCamelCase : List[str]="<unk>" ,lowerCamelCase : int="<sep>" ,lowerCamelCase : List[Any]="<pad>" ,lowerCamelCase : Optional[int]="<cls>" ,lowerCamelCase : List[Any]="<mask>" ,lowerCamelCase : Optional[int]=["<eop>", "<eod>"] ,lowerCamelCase : Dict = None ,**lowerCamelCase : str ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = AddedToken(_snake_case ,lstrip=_snake_case ,rstrip=_snake_case ) if isinstance(_snake_case ,_snake_case ) else mask_token
__SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_snake_case ,remove_space=_snake_case ,keep_accents=_snake_case ,bos_token=_snake_case ,eos_token=_snake_case ,unk_token=_snake_case ,sep_token=_snake_case ,pad_token=_snake_case ,cls_token=_snake_case ,mask_token=_snake_case ,additional_special_tokens=_snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**_snake_case ,)
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = do_lower_case
__SCREAMING_SNAKE_CASE = remove_space
__SCREAMING_SNAKE_CASE = keep_accents
__SCREAMING_SNAKE_CASE = vocab_file
__SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
@property
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return len(self.sp_model )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.__dict__.copy()
__SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self : Union[str, Any] ,lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Tuple ):
'''simple docstring'''
if self.remove_space:
__SCREAMING_SNAKE_CASE = """ """.join(inputs.strip().split() )
else:
__SCREAMING_SNAKE_CASE = inputs
__SCREAMING_SNAKE_CASE = outputs.replace("""``""" ,"""\"""" ).replace("""\'\'""" ,"""\"""" )
if not self.keep_accents:
__SCREAMING_SNAKE_CASE = unicodedata.normalize("""NFKD""" ,_snake_case )
__SCREAMING_SNAKE_CASE = """""".join([c for c in outputs if not unicodedata.combining(_snake_case )] )
if self.do_lower_case:
__SCREAMING_SNAKE_CASE = outputs.lower()
return outputs
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.preprocess_text(_snake_case )
__SCREAMING_SNAKE_CASE = self.sp_model.encode(_snake_case ,out_type=_snake_case )
__SCREAMING_SNAKE_CASE = []
for piece in pieces:
if len(_snake_case ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
__SCREAMING_SNAKE_CASE = self.sp_model.EncodeAsPieces(piece[:-1].replace(_snake_case ,"""""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__SCREAMING_SNAKE_CASE = cur_pieces[1:]
else:
__SCREAMING_SNAKE_CASE = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_snake_case )
else:
new_pieces.append(_snake_case )
return new_pieces
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : List[Any] ):
'''simple docstring'''
return self.sp_model.PieceToId(_snake_case )
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : Dict ):
'''simple docstring'''
return self.sp_model.IdToPiece(_snake_case )
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """""".join(_snake_case ).replace(_snake_case ,""" """ ).strip()
return out_string
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Tuple ,lowerCamelCase : Dict = False ,lowerCamelCase : Union[str, Any] = None ,lowerCamelCase : Optional[Any] = True ,**lowerCamelCase : int ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = kwargs.pop("""use_source_tokenizer""" ,_snake_case )
__SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(_snake_case ,skip_special_tokens=_snake_case )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_snake_case ) )
__SCREAMING_SNAKE_CASE = []
sub_texts.append(_snake_case )
else:
current_sub_text.append(_snake_case )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_snake_case ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
__SCREAMING_SNAKE_CASE = """""".join(_snake_case )
__SCREAMING_SNAKE_CASE = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__SCREAMING_SNAKE_CASE = self.clean_up_tokenization(_snake_case )
return clean_text
else:
return text
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Optional[int] = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : List[str] ,lowerCamelCase : Optional[int] = None ,lowerCamelCase : Tuple = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case ,token_ids_a=_snake_case ,already_has_special_tokens=_snake_case )
if token_ids_a is not None:
return ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case )) + [1, 1]
return ([0] * len(_snake_case )) + [1, 1]
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Dict ,lowerCamelCase : Union[str, Any] = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Optional[int] = None ):
'''simple docstring'''
if not os.path.isdir(_snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__SCREAMING_SNAKE_CASE = os.path.join(
_snake_case ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case ,"""wb""" ) as fi:
__SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
| 109 |
"""simple docstring"""
from typing import Any
class a :
def __init__( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = data
lowerCAmelCase = None
def __repr__( self ):
"""simple docstring"""
return F'Node({self.data})'
class a :
def __init__( self ):
"""simple docstring"""
lowerCAmelCase = None
def __iter__( self ):
"""simple docstring"""
lowerCAmelCase = self.head
while node:
yield node.data
lowerCAmelCase = node.next
def __len__( self ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ):
"""simple docstring"""
return "->".join([str(_snake_case ) for item in self] )
def __getitem__( self , _snake_case ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , _snake_case , _snake_case ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
lowerCAmelCase = self.head
for _ in range(_snake_case ):
lowerCAmelCase = current.next
lowerCAmelCase = data
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
self.insert_nth(len(self ) , _snake_case )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
self.insert_nth(0 , _snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
lowerCAmelCase = Node(_snake_case )
if self.head is None:
lowerCAmelCase = new_node
elif index == 0:
lowerCAmelCase = self.head # link new_node to head
lowerCAmelCase = new_node
else:
lowerCAmelCase = self.head
for _ in range(index - 1 ):
lowerCAmelCase = temp.next
lowerCAmelCase = temp.next
lowerCAmelCase = new_node
def UpperCamelCase__ ( self ): # print every node data
"""simple docstring"""
print(self )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.delete_nth(0 )
def UpperCamelCase__ ( self ): # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase__ ( self , _snake_case = 0 ):
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
lowerCAmelCase = self.head # default first node
if index == 0:
lowerCAmelCase = self.head.next
else:
lowerCAmelCase = self.head
for _ in range(index - 1 ):
lowerCAmelCase = temp.next
lowerCAmelCase = temp.next
lowerCAmelCase = temp.next.next
return delete_node.data
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.head is None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = None
lowerCAmelCase = self.head
while current:
# Store the current node's next node.
lowerCAmelCase = current.next
# Make the current node's next point backwards
lowerCAmelCase = prev
# Make the previous node be the current node
lowerCAmelCase = current
# Make the current node the next node (to progress iteration)
lowerCAmelCase = next_node
# Return prev in order to put the head at the end
lowerCAmelCase = prev
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = LinkedList()
assert linked_list.is_empty() is True
assert str(_UpperCAmelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_UpperCAmelCase ) == i
linked_list.insert_nth(_UpperCAmelCase , i + 1 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_UpperCAmelCase ) == 9
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCAmelCase = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(-8 , 1 ) )
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = [
-9,
100,
Node(7734_5112 ),
'dlrow olleH',
7,
5555,
0,
-192.5_5555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
lowerCAmelCase = LinkedList()
for i in test_input:
linked_list.insert_tail(_UpperCAmelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_UpperCAmelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCAmelCase = linked_list.delete_head()
assert result == -9
assert (
str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCAmelCase = linked_list.delete_tail()
assert result == 12.2
assert (
str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCAmelCase = linked_list.delete_nth(10 )
assert result is None
assert (
str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_UpperCAmelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_UpperCAmelCase )
assert (
str(_UpperCAmelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_UpperCAmelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _SCREAMING_SNAKE_CASE ():
from doctest import testmod
testmod()
lowerCAmelCase = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_UpperCAmelCase )
print('\nReading/changing Node data using indexing:' )
print(F'Element at Position 1: {linked_list[1]}' )
lowerCAmelCase = input('Enter New Value: ' ).strip()
print('New list:' )
print(_UpperCAmelCase )
print(F'length of linked_list is : {len(_UpperCAmelCase )}' )
if __name__ == "__main__":
main()
| 4 | 0 |
import argparse
from collections import defaultdict
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(_UpperCAmelCase , "r" ) as f:
lowerCamelCase = f.readlines()
lowerCamelCase = F"""class {class_name}("""
lowerCamelCase = F"""{4 * " "}def {test_name}("""
lowerCamelCase = F"""{8 * " "}{correct_line.split()[0]}"""
lowerCamelCase = F"""{16 * " "}{correct_line.split()[0]}"""
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = 0
lowerCamelCase = 0
lowerCamelCase = []
for line in lines:
if line.startswith(_UpperCAmelCase ):
lowerCamelCase = True
elif in_class and line.startswith(_UpperCAmelCase ):
lowerCamelCase = True
elif in_class and in_func and (line.startswith(_UpperCAmelCase ) or line.startswith(_UpperCAmelCase )):
lowerCamelCase = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
lowerCamelCase = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
lowerCamelCase = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * " "}{correct_line}""" )
lowerCamelCase = lowerCamelCase = lowerCamelCase = lowerCamelCase = False
else:
new_lines.append(_UpperCAmelCase )
with open(_UpperCAmelCase , "w" ) as f:
for line in new_lines:
f.write(_UpperCAmelCase )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__=None ):
"""simple docstring"""
if fail is not None:
with open(_UpperCAmelCase , "r" ) as f:
lowerCamelCase = {l.strip() for l in f.readlines()}
else:
lowerCamelCase = None
with open(_UpperCAmelCase , "r" ) as f:
lowerCamelCase = f.readlines()
lowerCamelCase = defaultdict(_UpperCAmelCase )
for line in correct_lines:
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
a_ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
a_ : Dict = parser.parse_args()
main(args.correct_filename, args.fail_filename) | 623 |
"""simple docstring"""
from __future__ import annotations
import requests
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
lowerCAmelCase = F'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(_UpperCAmelCase ).json()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 10 ):
lowerCAmelCase = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
lowerCAmelCase = requests.get(_UpperCAmelCase ).json()[:max_stories]
return [get_hackernews_story(_UpperCAmelCase ) for story_id in story_ids]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 10 ):
lowerCAmelCase = hackernews_top_stories(_UpperCAmelCase )
return "\n".join('* [{title}]({url})'.format(**_UpperCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 4 | 0 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCamelCase :Dict = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class _lowerCAmelCase ( a__ ):
def __init__(self , **lowercase ):
super().__init__(**_snake_case )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__(self , lowercase , **lowercase ):
return super().__call__(_snake_case , **_snake_case )
def _a (self , **lowercase ):
A_ : int = {}
if "candidate_labels" in kwargs:
A_ : str = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
A_ : List[str] = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def _a (self , lowercase , lowercase=None , lowercase="This is a photo of {}." ):
A_ : Any = load_image(_snake_case )
A_ : List[Any] = self.image_processor(images=[image] , return_tensors=self.framework )
A_ : int = candidate_labels
A_ : List[Any] = [hypothesis_template.format(_snake_case ) for x in candidate_labels]
A_ : Optional[int] = self.tokenizer(_snake_case , return_tensors=self.framework , padding=_snake_case )
A_ : Tuple = [text_inputs]
return inputs
def _a (self , lowercase ):
A_ : Optional[Any] = model_inputs.pop("""candidate_labels""" )
A_ : Dict = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , _snake_case ):
A_ : Dict = text_inputs[0]
else:
# Batching case.
A_ : Optional[Any] = text_inputs[0][0]
A_ : str = self.model(**_snake_case , **_snake_case )
A_ : Optional[int] = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def _a (self , lowercase ):
A_ : int = model_outputs.pop("""candidate_labels""" )
A_ : Optional[Any] = model_outputs["""logits"""][0]
if self.framework == "pt":
A_ : List[str] = logits.softmax(dim=-1 ).squeeze(-1 )
A_ : Dict = probs.tolist()
if not isinstance(_snake_case , _snake_case ):
A_ : Union[str, Any] = [scores]
elif self.framework == "tf":
A_ : Dict = stable_softmax(_snake_case , axis=-1 )
A_ : Union[str, Any] = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
A_ : Any = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(_snake_case , _snake_case ) , key=lambda lowercase : -x[0] )
]
return result | 667 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any ):
lowerCAmelCase = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCAmelCase = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowerCAmelCase = 4
lowerCAmelCase = 48
lowerCAmelCase = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCAmelCase = [6, 6, 6, 6]
lowerCAmelCase = 60
lowerCAmelCase = [6, 6, 6, 6]
lowerCAmelCase = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCAmelCase = 4
lowerCAmelCase = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowerCAmelCase = 1
lowerCAmelCase = 1
lowerCAmelCase = 126
lowerCAmelCase = 7
lowerCAmelCase = 255.0
lowerCAmelCase = ''
return config
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ):
if "patch_embed.proj" in name and "layers" not in name:
lowerCAmelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
lowerCAmelCase = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
lowerCAmelCase = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
lowerCAmelCase = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
lowerCAmelCase = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowerCAmelCase = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowerCAmelCase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowerCAmelCase = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowerCAmelCase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCAmelCase = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
lowerCAmelCase = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
lowerCAmelCase = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
lowerCAmelCase = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
lowerCAmelCase = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
lowerCAmelCase = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
lowerCAmelCase = 'layernorm.weight'
if name == "norm.bias":
lowerCAmelCase = 'layernorm.bias'
if "conv_first" in name:
lowerCAmelCase = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowerCAmelCase = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowerCAmelCase = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
lowerCAmelCase = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
lowerCAmelCase = name.replace('upsample.2' , 'upsample.convolution_1' )
lowerCAmelCase = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
lowerCAmelCase = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
lowerCAmelCase = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
lowerCAmelCase = 'swin2sr.' + name
return name
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Dict ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase = orig_state_dict.pop(_UpperCAmelCase )
if "qkv" in key:
lowerCAmelCase = key.split('.' )
lowerCAmelCase = int(key_split[1] )
lowerCAmelCase = int(key_split[4] )
lowerCAmelCase = config.embed_dim
if "weight" in key:
lowerCAmelCase = val[:dim, :]
lowerCAmelCase = val[dim : dim * 2, :]
lowerCAmelCase = val[-dim:, :]
else:
lowerCAmelCase = val[:dim]
lowerCAmelCase = val[dim : dim * 2]
lowerCAmelCase = val[-dim:]
pass
else:
lowerCAmelCase = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple ):
lowerCAmelCase = get_config(_UpperCAmelCase )
lowerCAmelCase = SwinaSRForImageSuperResolution(_UpperCAmelCase )
model.eval()
lowerCAmelCase = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location='cpu' )
lowerCAmelCase = convert_state_dict(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase ,lowerCAmelCase = model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
raise ValueError('Missing keys when converting: {}'.format(_UpperCAmelCase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F'Unexpected key {key} in state_dict' )
# verify values
lowerCAmelCase = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
lowerCAmelCase = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('RGB' )
lowerCAmelCase = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowerCAmelCase = 126 if 'Jpeg' in checkpoint_url else 256
lowerCAmelCase = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowerCAmelCase = transforms(_UpperCAmelCase ).unsqueeze(0 )
if config.num_channels == 1:
lowerCAmelCase = pixel_values[:, 0, :, :].unsqueeze(1 )
lowerCAmelCase = model(_UpperCAmelCase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowerCAmelCase = torch.Size([1, 3, 512, 512] )
lowerCAmelCase = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCAmelCase = torch.Size([1, 3, 1024, 1024] )
lowerCAmelCase = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowerCAmelCase = torch.Size([1, 3, 1024, 1024] )
lowerCAmelCase = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCAmelCase = torch.Size([1, 3, 512, 512] )
lowerCAmelCase = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCAmelCase = torch.Size([1, 3, 1024, 1024] )
lowerCAmelCase = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), F'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , _UpperCAmelCase , atol=1e-3 )
print('Looks ok!' )
lowerCAmelCase = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
lowerCAmelCase = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_UpperCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
model.push_to_hub(F'caidas/{model_name}' )
processor.push_to_hub(F'caidas/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
__UpperCamelCase : Optional[int] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 | 0 |
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def _lowercase ( __snake_case ) -> Optional[Any]:
return 1.0 / (1.0 + np.exp(-_outputs ))
def _lowercase ( __snake_case ) -> str:
__lowerCAmelCase : Optional[int] = np.max(_outputs ,axis=-1 ,keepdims=_UpperCAmelCase )
__lowerCAmelCase : Optional[int] = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 ,keepdims=_UpperCAmelCase )
class A__ ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'sigmoid'
SCREAMING_SNAKE_CASE = 'softmax'
SCREAMING_SNAKE_CASE = 'none'
@add_end_docstrings(
a__ , R'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `"default"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `"sigmoid"`: Applies the sigmoid function on the output.\n - `"softmax"`: Applies the softmax function on the output.\n - `"none"`: Does not apply any function on the output.\n ' , )
class A__ ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = ClassificationFunction.NONE
def __init__( self: Any , **_SCREAMING_SNAKE_CASE: List[str]) -> Optional[Any]:
"""simple docstring"""
super().__init__(**_snake_case)
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING)
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[Any]=None , _SCREAMING_SNAKE_CASE: Optional[Any]=None , _SCREAMING_SNAKE_CASE: List[str]="" , **_SCREAMING_SNAKE_CASE: List[Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = tokenizer_kwargs
__lowerCAmelCase : int = {}
if hasattr(self.model.config , "return_all_scores") and return_all_scores is None:
__lowerCAmelCase : int = self.model.config.return_all_scores
if isinstance(_snake_case , _snake_case) or top_k is None:
__lowerCAmelCase : str = top_k
__lowerCAmelCase : List[str] = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." , _snake_case , )
if return_all_scores:
__lowerCAmelCase : Dict = None
else:
__lowerCAmelCase : Optional[Any] = 1
if isinstance(_snake_case , _snake_case):
__lowerCAmelCase : List[str] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
__lowerCAmelCase : Any = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self: List[Any] , *_SCREAMING_SNAKE_CASE: Tuple , **_SCREAMING_SNAKE_CASE: List[str]) -> str:
"""simple docstring"""
__lowerCAmelCase : Any = super().__call__(*_snake_case , **_snake_case)
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
__lowerCAmelCase : List[Any] = "top_k" not in kwargs
if isinstance(args[0] , _snake_case) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: int , **_SCREAMING_SNAKE_CASE: Dict) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.framework
if isinstance(_snake_case , _snake_case):
return self.tokenizer(**_snake_case , return_tensors=_snake_case , **_snake_case)
elif isinstance(_snake_case , _snake_case) and len(_snake_case) == 1 and isinstance(inputs[0] , _snake_case) and len(inputs[0]) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=_snake_case , **_snake_case)
elif isinstance(_snake_case , _snake_case):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.")
return self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case)
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int]) -> Any:
"""simple docstring"""
return self.model(**_snake_case)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[int]=None , _SCREAMING_SNAKE_CASE: Any=1 , _SCREAMING_SNAKE_CASE: Dict=True) -> Dict:
"""simple docstring"""
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
__lowerCAmelCase : List[Any] = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
__lowerCAmelCase : str = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , "function_to_apply") and function_to_apply is None:
__lowerCAmelCase : Optional[int] = self.model.config.function_to_apply
else:
__lowerCAmelCase : Any = ClassificationFunction.NONE
__lowerCAmelCase : Union[str, Any] = model_outputs["logits"][0]
__lowerCAmelCase : int = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
__lowerCAmelCase : str = sigmoid(_snake_case)
elif function_to_apply == ClassificationFunction.SOFTMAX:
__lowerCAmelCase : Optional[int] = softmax(_snake_case)
elif function_to_apply == ClassificationFunction.NONE:
__lowerCAmelCase : List[Any] = outputs
else:
raise ValueError(F"""Unrecognized `function_to_apply` argument: {function_to_apply}""")
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
__lowerCAmelCase : List[str] = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(_snake_case)
]
if not _legacy:
dict_scores.sort(key=lambda _SCREAMING_SNAKE_CASE: x["score"] , reverse=_snake_case)
if top_k is not None:
__lowerCAmelCase : Optional[Any] = dict_scores[:top_k]
return dict_scores | 293 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class a ( a__ ):
snake_case__ = '''megatron-bert'''
def __init__( self , _snake_case=2_90_56 , _snake_case=10_24 , _snake_case=24 , _snake_case=16 , _snake_case=40_96 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=0 , _snake_case="absolute" , _snake_case=True , **_snake_case , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , **_snake_case )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
| 4 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__a: Optional[int] = logging.get_logger(__name__)
__a: Union[str, Any] = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "instructblip_vision_model"
def __init__( self , __lowerCAmelCase=1408 , __lowerCAmelCase=6144 , __lowerCAmelCase=39 , __lowerCAmelCase=16 , __lowerCAmelCase=224 , __lowerCAmelCase=14 , __lowerCAmelCase="gelu" , __lowerCAmelCase=1E-6 , __lowerCAmelCase=0.0 , __lowerCAmelCase=1E-10 , __lowerCAmelCase=True , **__lowerCAmelCase , ) -> str:
super().__init__(**_snake_case )
lowercase__ : Optional[int] = hidden_size
lowercase__ : Optional[Any] = intermediate_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Tuple = num_attention_heads
lowercase__ : Dict = patch_size
lowercase__ : str = image_size
lowercase__ : Any = initializer_range
lowercase__ : int = attention_dropout
lowercase__ : Optional[Any] = layer_norm_eps
lowercase__ : Optional[int] = hidden_act
lowercase__ : int = qkv_bias
@classmethod
def _lowerCAmelCase( cls , __lowerCAmelCase , **__lowerCAmelCase ) -> str:
cls._set_token_in_kwargs(_snake_case )
lowercase__ , lowercase__ : Optional[Any] = cls.get_config_dict(_snake_case , **_snake_case )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
lowercase__ : int = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_snake_case , **_snake_case )
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "instructblip_qformer"
def __init__( self , __lowerCAmelCase=30522 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=0 , __lowerCAmelCase="absolute" , __lowerCAmelCase=2 , __lowerCAmelCase=1408 , **__lowerCAmelCase , ) -> List[str]:
super().__init__(pad_token_id=_snake_case , **_snake_case )
lowercase__ : Any = vocab_size
lowercase__ : List[Any] = hidden_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : List[Any] = hidden_act
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : Optional[Any] = attention_probs_dropout_prob
lowercase__ : Dict = max_position_embeddings
lowercase__ : str = initializer_range
lowercase__ : Tuple = layer_norm_eps
lowercase__ : str = position_embedding_type
lowercase__ : Dict = cross_attention_frequency
lowercase__ : str = encoder_hidden_size
@classmethod
def _lowerCAmelCase( cls , __lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
cls._set_token_in_kwargs(_snake_case )
lowercase__ , lowercase__ : Union[str, Any] = cls.get_config_dict(_snake_case , **_snake_case )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
lowercase__ : Optional[int] = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_snake_case , **_snake_case )
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "instructblip"
SCREAMING_SNAKE_CASE = True
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=32 , **__lowerCAmelCase ) -> Optional[int]:
super().__init__(**_snake_case )
if vision_config is None:
lowercase__ : List[Any] = {}
logger.info('''vision_config is None. initializing the InstructBlipVisionConfig with default values.''' )
if qformer_config is None:
lowercase__ : Optional[int] = {}
logger.info('''qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.''' )
if text_config is None:
lowercase__ : List[str] = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
lowercase__ : Dict = InstructBlipVisionConfig(**_snake_case )
lowercase__ : Optional[Any] = InstructBlipQFormerConfig(**_snake_case )
lowercase__ : Optional[int] = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
lowercase__ : Optional[Any] = CONFIG_MAPPING[text_model_type](**_snake_case )
lowercase__ : Any = self.text_config.tie_word_embeddings
lowercase__ : List[str] = self.text_config.is_encoder_decoder
lowercase__ : Any = num_query_tokens
lowercase__ : str = self.vision_config.hidden_size
lowercase__ : Any = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowercase__ : Any = 1.0
lowercase__ : Dict = 0.0_2
@classmethod
def _lowerCAmelCase( cls , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase , ) -> int:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_snake_case , )
def _lowerCAmelCase( self ) -> int:
lowercase__ : List[Any] = copy.deepcopy(self.__dict__ )
lowercase__ : int = self.vision_config.to_dict()
lowercase__ : Optional[int] = self.qformer_config.to_dict()
lowercase__ : Union[str, Any] = self.text_config.to_dict()
lowercase__ : List[Any] = self.__class__.model_type
return output
| 152 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
"""simple docstring"""
__A : str = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__A : List[str] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__A : Any = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 499 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class a ( a__ ):
snake_case__ = 42
class a ( a__ , a__ ):
@register_to_config
def __init__( self , _snake_case = 3 , _snake_case = 3 , _snake_case = ("DownEncoderBlock2D",) , _snake_case = ("UpDecoderBlock2D",) , _snake_case = (64,) , _snake_case = 1 , _snake_case = "silu" , _snake_case = 3 , _snake_case = 32 , _snake_case = 2_56 , _snake_case = 32 , _snake_case = None , _snake_case = 0.18_215 , _snake_case = "group" , ):
"""simple docstring"""
super().__init__()
# pass init params to Encoder
lowerCAmelCase = Encoder(
in_channels=_snake_case , out_channels=_snake_case , down_block_types=_snake_case , block_out_channels=_snake_case , layers_per_block=_snake_case , act_fn=_snake_case , norm_num_groups=_snake_case , double_z=_snake_case , )
lowerCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCAmelCase = nn.Convad(_snake_case , _snake_case , 1 )
lowerCAmelCase = VectorQuantizer(_snake_case , _snake_case , beta=0.25 , remap=_snake_case , sane_index_shape=_snake_case )
lowerCAmelCase = nn.Convad(_snake_case , _snake_case , 1 )
# pass init params to Decoder
lowerCAmelCase = Decoder(
in_channels=_snake_case , out_channels=_snake_case , up_block_types=_snake_case , block_out_channels=_snake_case , layers_per_block=_snake_case , act_fn=_snake_case , norm_num_groups=_snake_case , norm_type=_snake_case , )
@apply_forward_hook
def UpperCamelCase__ ( self , _snake_case , _snake_case = True ):
"""simple docstring"""
lowerCAmelCase = self.encoder(_snake_case )
lowerCAmelCase = self.quant_conv(_snake_case )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_snake_case )
@apply_forward_hook
def UpperCamelCase__ ( self , _snake_case , _snake_case = False , _snake_case = True ):
"""simple docstring"""
if not force_not_quantize:
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = self.quantize(_snake_case )
else:
lowerCAmelCase = h
lowerCAmelCase = self.post_quant_conv(_snake_case )
lowerCAmelCase = self.decoder(_snake_case , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case = True ):
"""simple docstring"""
lowerCAmelCase = sample
lowerCAmelCase = self.encode(_snake_case ).latents
lowerCAmelCase = self.decode(_snake_case ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_snake_case )
| 4 | 0 |
import unittest
from knapsack import greedy_knapsack as kp
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
"""simple docstring"""
__lowerCAmelCase : int = [10, 20, 30, 40, 50, 60]
__lowerCAmelCase : List[Any] = [2, 4, 6, 8, 10, 12]
__lowerCAmelCase : List[str] = 1_00
self.assertEqual(kp.calc_profit(_snake_case , _snake_case , _snake_case ) , 2_10 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
self.assertRaisesRegex(_snake_case , """max_weight must greater than zero.""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.assertRaisesRegex(_snake_case , """Weight can not be negative.""" )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertRaisesRegex(_snake_case , """Profit can not be negative.""" )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
"""simple docstring"""
self.assertRaisesRegex(_snake_case , """max_weight must greater than zero.""" )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.assertRaisesRegex(
_snake_case , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 651 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
__UpperCamelCase : Optional[Any] = tuple[int, int]
class a :
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = vertices
lowerCAmelCase = {
(min(_snake_case ), max(_snake_case )): weight for edge, weight in edges.items()
}
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowerCAmelCase = weight
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = Graph({min(self.vertices )} , {} )
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowerCAmelCase = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowerCAmelCase = edge
lowerCAmelCase = weight
subgraph.add_edge(_snake_case , _snake_case )
return subgraph
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "p107_network.txt" ):
lowerCAmelCase = os.path.abspath(os.path.dirname(_UpperCAmelCase ) )
lowerCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = {}
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
with open(_UpperCAmelCase ) as f:
lowerCAmelCase = f.read().strip().split('\n' )
lowerCAmelCase = [line.split(',' ) for line in data]
for edgea in range(1 , len(_UpperCAmelCase ) ):
for edgea in range(_UpperCAmelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
lowerCAmelCase = int(adjaceny_matrix[edgea][edgea] )
lowerCAmelCase = Graph(set(range(len(_UpperCAmelCase ) ) ) , _UpperCAmelCase )
lowerCAmelCase = graph.prims_algorithm()
lowerCAmelCase = sum(graph.edges.values() )
lowerCAmelCase = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 4 | 0 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ , lowercase_ = analyze_text(_UpperCAmelCase )
lowercase_ = list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
lowercase_ = sum(single_char_strings.values() )
# one length string
lowercase_ = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowercase_ = single_char_strings[ch]
lowercase_ = my_str / all_sum
my_fir_sum += prob * math.loga(_UpperCAmelCase ) # entropy formula.
# print entropy
print(F'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
lowercase_ = sum(two_char_strings.values() )
lowercase_ = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowercase_ = cha + cha
if sequence in two_char_strings:
lowercase_ = two_char_strings[sequence]
lowercase_ = int(_UpperCAmelCase ) / all_sum
my_sec_sum += prob * math.loga(_UpperCAmelCase )
# print second entropy
print(F'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(F'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = Counter() # type: ignore
lowercase_ = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_UpperCAmelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def UpperCAmelCase_ ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 412 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ):
lowerCAmelCase = np.array([[1, item, train_mtch[i]] for i, item in enumerate(_UpperCAmelCase )] )
lowerCAmelCase = np.array(_UpperCAmelCase )
lowerCAmelCase = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , _UpperCAmelCase ) ) , x.transpose() ) , _UpperCAmelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ):
lowerCAmelCase = (1, 2, 1)
lowerCAmelCase = (1, 1, 0, 7)
lowerCAmelCase = SARIMAX(
_UpperCAmelCase , exog=_UpperCAmelCase , order=_UpperCAmelCase , seasonal_order=_UpperCAmelCase )
lowerCAmelCase = model.fit(disp=_UpperCAmelCase , maxiter=600 , method='nm' )
lowerCAmelCase = model_fit.predict(1 , len(_UpperCAmelCase ) , exog=[test_match] )
return result[0]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ):
lowerCAmelCase = SVR(kernel='rbf' , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = regressor.predict(_UpperCAmelCase )
return y_pred[0]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list ):
train_user.sort()
lowerCAmelCase = np.percentile(_UpperCAmelCase , 25 )
lowerCAmelCase = np.percentile(_UpperCAmelCase , 75 )
lowerCAmelCase = qa - qa
lowerCAmelCase = qa - (iqr * 0.1)
return low_lim
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : float ):
lowerCAmelCase = 0
lowerCAmelCase = 0
for i in list_vote:
if i > actual_result:
lowerCAmelCase = not_safe + 1
else:
if abs(abs(_UpperCAmelCase ) - abs(_UpperCAmelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
__UpperCamelCase : Optional[Any] = [[1_8231, 0.0, 1], [2_2621, 1.0, 2], [1_5675, 0.0, 3], [2_3583, 1.0, 4]]
__UpperCamelCase : Any = pd.DataFrame(
data_input, columns=['''total_user''', '''total_even''', '''days''']
)
__UpperCamelCase : Dict = Normalizer().fit_transform(data_input_df.values)
# split data
__UpperCamelCase : Dict = normalize_df[:, 2].tolist()
__UpperCamelCase : Union[str, Any] = normalize_df[:, 0].tolist()
__UpperCamelCase : List[str] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
__UpperCamelCase : Optional[int] = normalize_df[:, [1, 2]].tolist()
__UpperCamelCase : Tuple = x[: len(x) - 1]
__UpperCamelCase : Any = x[len(x) - 1 :]
# for linear regression & sarimax
__UpperCamelCase : str = total_date[: len(total_date) - 1]
__UpperCamelCase : Union[str, Any] = total_user[: len(total_user) - 1]
__UpperCamelCase : List[Any] = total_match[: len(total_match) - 1]
__UpperCamelCase : Optional[Any] = total_date[len(total_date) - 1 :]
__UpperCamelCase : str = total_user[len(total_user) - 1 :]
__UpperCamelCase : str = total_match[len(total_match) - 1 :]
# voting system with forecasting
__UpperCamelCase : Any = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
__UpperCamelCase : List[str] = '''''' if data_safety_checker(res_vote, tst_user) else '''not '''
print('''Today\'s data is {not_str}safe.''')
| 4 | 0 |
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : int ):
__UpperCAmelCase : Tuple = word.split()
def justify(__lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int ) -> str:
__UpperCAmelCase : List[Any] = max_width - width
__UpperCAmelCase : Tuple = len(_UpperCAmelCase )
if len(_UpperCAmelCase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
__UpperCAmelCase : Any = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
__UpperCAmelCase : List[Any] = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
__UpperCAmelCase : Optional[Any] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_UpperCAmelCase ):
num_spaces_between_words_list[i] += 1
__UpperCAmelCase : Union[str, Any] = []
for i in range(_UpperCAmelCase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_UpperCAmelCase )
__UpperCAmelCase : int = []
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : Tuple = 0
for word in words:
if width + len(_UpperCAmelCase ) + len(_UpperCAmelCase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_UpperCAmelCase )
width += len(_UpperCAmelCase )
else:
# justify the line and add it to result
answer.append(justify(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) )
# reset new line and new width
__UpperCAmelCase , __UpperCAmelCase : Tuple = [word], len(_UpperCAmelCase )
__UpperCAmelCase : str = max_width - width - len(_UpperCAmelCase )
answer.append(""" """.join(_UpperCAmelCase ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 63 |
"""simple docstring"""
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=_UpperCAmelCase , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=_UpperCAmelCase , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=_UpperCAmelCase , default=42 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=_UpperCAmelCase , default=0 , help='cuda_id.' , )
lowerCAmelCase = parser.parse_args()
return args
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] ):
if not len(_UpperCAmelCase ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
lowerCAmelCase ,lowerCAmelCase = imgs[0].size
lowerCAmelCase = Image.new('RGB' , size=(cols * w, rows * h) )
lowerCAmelCase ,lowerCAmelCase = grid.size
for i, img in enumerate(_UpperCAmelCase ):
grid.paste(_UpperCAmelCase , box=(i % cols * w, i // cols * h) )
return grid
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any]="robotic cat with wings" , _UpperCAmelCase : Optional[int]=7.5 , _UpperCAmelCase : Dict=50 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : int=42 , ):
lowerCAmelCase = torch.Generator(pipeline.device ).manual_seed(_UpperCAmelCase )
lowerCAmelCase = pipeline(
_UpperCAmelCase , guidance_scale=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , generator=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , ).images
lowerCAmelCase = int(math.sqrt(_UpperCAmelCase ) )
lowerCAmelCase = image_grid(_UpperCAmelCase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
__UpperCamelCase : Optional[Any] = parse_args()
# Load models and create wrapper for stable diffusion
__UpperCamelCase : List[Any] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
__UpperCamelCase : str = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
__UpperCamelCase : Optional[int] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
__UpperCamelCase : List[str] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
__UpperCamelCase : Tuple = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__UpperCamelCase : Union[str, Any] = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
__UpperCamelCase : Dict = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
__UpperCamelCase : Dict = unet.to(torch.device('''cuda''', args.cuda_id))
__UpperCamelCase : Optional[Any] = pipeline.to(unet.device)
__UpperCamelCase ,__UpperCamelCase : List[Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
__UpperCamelCase : int = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 4 | 0 |
import os
from collections.abc import Iterator
def a__ ( snake_case = "." ):
"""simple docstring"""
for dir_path, dir_names, filenames in os.walk(_UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._''']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_UpperCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(_UpperCAmelCase , _UpperCAmelCase ).lstrip('''./''' )
def a__ ( snake_case ):
"""simple docstring"""
return F'''{i * " "}*''' if i else "\n##"
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_UpperCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F'''{md_prefix(_UpperCAmelCase )} {new_part.replace("_" , " " ).title()}''' )
return new_path
def a__ ( snake_case = "." ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
for filepath in sorted(good_file_paths(_UpperCAmelCase ) ):
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = os.path.split(_UpperCAmelCase )
if filepath != old_path:
__SCREAMING_SNAKE_CASE : Union[str, Any] = print_path(_UpperCAmelCase , _UpperCAmelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = (filepath.count(os.sep ) + 1) if filepath else 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = F'''{filepath}/{filename}'''.replace(''' ''' , '''%20''' )
__SCREAMING_SNAKE_CASE : int = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0]
print(F'''{md_prefix(_UpperCAmelCase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md(""".""")
| 74 |
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : nn.ModuleList , _UpperCAmelCase : nn.ModuleList , _UpperCAmelCase : List[int] ):
lowerCAmelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ), F'{len(_UpperCAmelCase )} != {len(_UpperCAmelCase )}'
dest_layers.load_state_dict(layers_to_copy.state_dict() )
__UpperCamelCase : Optional[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
__UpperCamelCase : int = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] ):
try:
lowerCAmelCase = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'
F' {n_student}' )
return list(range(_UpperCAmelCase ) )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ):
if n_student > n_teacher:
raise ValueError(F'Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}' )
elif n_teacher == n_student:
return list(range(_UpperCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, PreTrainedModel] , _UpperCAmelCase : Union[str, Path] = "student" , _UpperCAmelCase : Union[int, None] = None , _UpperCAmelCase : Union[int, None] = None , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[Any]=None , **_UpperCAmelCase : str , ):
lowerCAmelCase = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
AutoTokenizer.from_pretrained(_UpperCAmelCase ).save_pretrained(_UpperCAmelCase ) # purely for convenience
lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(_UpperCAmelCase ).eval()
else:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), F'teacher must be a model or string got type {type(_UpperCAmelCase )}'
lowerCAmelCase = teacher.config.to_diff_dict()
try:
lowerCAmelCase ,lowerCAmelCase = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
lowerCAmelCase = teacher_e
if d is None:
lowerCAmelCase = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
lowerCAmelCase ,lowerCAmelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
lowerCAmelCase ,lowerCAmelCase = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
lowerCAmelCase = teacher_e
if d is None:
lowerCAmelCase = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(_UpperCAmelCase )
# Copy weights
lowerCAmelCase = teacher.config_class(**_UpperCAmelCase )
lowerCAmelCase = AutoModelForSeqaSeqLM.from_config(_UpperCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
lowerCAmelCase = student.load_state_dict(teacher.state_dict() , strict=_UpperCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
lowerCAmelCase ,lowerCAmelCase = list(range(_UpperCAmelCase ) ), list(range(_UpperCAmelCase ) )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'
F' {save_path}' )
student.save_pretrained(_UpperCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
lowerCAmelCase = pick_layers_to_copy(_UpperCAmelCase , _UpperCAmelCase )
if d_layers_to_copy is None:
lowerCAmelCase = pick_layers_to_copy(_UpperCAmelCase , _UpperCAmelCase )
try:
if hasattr(
_UpperCAmelCase , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , _UpperCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , _UpperCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , _UpperCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , _UpperCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , _UpperCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , _UpperCAmelCase )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}' )
lowerCAmelCase = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(_UpperCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 4 | 0 |
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
UpperCAmelCase : Optional[Any] = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
UpperCAmelCase : str = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
UpperCAmelCase : Dict = dict(zip(vocab, range(len(vocab))))
UpperCAmelCase : Union[str, Any] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = Path(tmpdirname)
UpperCAmelCase : Optional[int] = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
UpperCAmelCase : Tuple = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
UpperCAmelCase : List[Any] = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
UpperCAmelCase : Dict = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
UpperCAmelCase : List[Any] = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=10_00,
tgt_vocab_size=10_00,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
UpperCAmelCase : Optional[int] = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
UpperCAmelCase : Union[str, Any] = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
UpperCAmelCase : Dict = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 239 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Dict = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = list(_UpperCAmelCase )
__SCREAMING_SNAKE_CASE = list(_UpperCAmelCase )
__SCREAMING_SNAKE_CASE = 0
for i in range(len(_UpperCAmelCase ) ):
if lista[i] != lista[i]:
count += 1
__SCREAMING_SNAKE_CASE = """_"""
if count > 1:
return False
else:
return "".join(_UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
while True:
__SCREAMING_SNAKE_CASE = ["""$"""] * len(_UpperCAmelCase )
__SCREAMING_SNAKE_CASE = []
for i in range(len(_UpperCAmelCase ) ):
for j in range(i + 1 , len(_UpperCAmelCase ) ):
__SCREAMING_SNAKE_CASE = compare_string(binary[i] , binary[j] )
if k is False:
__SCREAMING_SNAKE_CASE = """*"""
__SCREAMING_SNAKE_CASE = """*"""
temp.append("""X""" )
for i in range(len(_UpperCAmelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_UpperCAmelCase ) == 0:
return pi
__SCREAMING_SNAKE_CASE = list(set(_UpperCAmelCase ) )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
for minterm in minterms:
__SCREAMING_SNAKE_CASE = """"""
for _ in range(_UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = str(minterm % 2 ) + string
minterm //= 2
temp.append(_UpperCAmelCase )
return temp
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = list(_UpperCAmelCase )
__SCREAMING_SNAKE_CASE = list(_UpperCAmelCase )
__SCREAMING_SNAKE_CASE = 0
for i in range(len(_UpperCAmelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = [0] * len(_UpperCAmelCase )
for i in range(len(chart[0] ) ):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = -1
for j in range(len(_UpperCAmelCase ) ):
if chart[j][i] == 1:
count += 1
__SCREAMING_SNAKE_CASE = j
if count == 1:
__SCREAMING_SNAKE_CASE = 1
for i in range(len(_UpperCAmelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_UpperCAmelCase ) ):
__SCREAMING_SNAKE_CASE = 0
temp.append(prime_implicants[i] )
while True:
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = -1
__SCREAMING_SNAKE_CASE = 0
for i in range(len(_UpperCAmelCase ) ):
__SCREAMING_SNAKE_CASE = chart[i].count(1 )
if count_n > max_n:
__SCREAMING_SNAKE_CASE = count_n
__SCREAMING_SNAKE_CASE = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_UpperCAmelCase ) ):
__SCREAMING_SNAKE_CASE = 0
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [[0 for x in range(len(_UpperCAmelCase ) )] for x in range(len(_UpperCAmelCase ) )]
for i in range(len(_UpperCAmelCase ) ):
__SCREAMING_SNAKE_CASE = prime_implicants[i].count("""_""" )
for j in range(len(_UpperCAmelCase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = 1
return chart
def __magic_name__ ( ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = int(input("""Enter the no. of variables\n""" ) )
__SCREAMING_SNAKE_CASE = [
float(_UpperCAmelCase )
for x in input(
"""Enter the decimal representation of Minterms \'Spaces Separated\'\n""" ).split()
]
__SCREAMING_SNAKE_CASE = decimal_to_binary(_UpperCAmelCase , _UpperCAmelCase )
__SCREAMING_SNAKE_CASE = check(_UpperCAmelCase )
print("""Prime Implicants are:""" )
print(_UpperCAmelCase )
__SCREAMING_SNAKE_CASE = prime_implicant_chart(_UpperCAmelCase , _UpperCAmelCase )
__SCREAMING_SNAKE_CASE = selection(_UpperCAmelCase , _UpperCAmelCase )
print("""Essential Prime Implicants are:""" )
print(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 109 |
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[float] ):
lowerCAmelCase = 0.00
lowerCAmelCase = 0
for resistor in resistors:
if resistor <= 0:
lowerCAmelCase = F'Resistor at index {index} has a negative or zero value!'
raise ValueError(_UpperCAmelCase )
first_sum += 1 / float(_UpperCAmelCase )
index += 1
return 1 / first_sum
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[float] ):
lowerCAmelCase = 0.00
lowerCAmelCase = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowerCAmelCase = F'Resistor at index {index} has a negative value!'
raise ValueError(_UpperCAmelCase )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
a_ : Any = logging.get_logger(__name__)
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None ):
"""simple docstring"""
lowerCamelCase = tesseract_config if tesseract_config is not None else ""
# apply OCR
lowerCamelCase = to_pil_image(_UpperCAmelCase )
lowerCamelCase , lowerCamelCase = pil_image.size
lowerCamelCase = pytesseract.image_to_data(_UpperCAmelCase , lang=_UpperCAmelCase , output_type="dict" , config=_UpperCAmelCase )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
lowerCamelCase = [idx for idx, word in enumerate(_UpperCAmelCase ) if not word.strip()]
lowerCamelCase = [word for idx, word in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
lowerCamelCase = [coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
lowerCamelCase = [coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
lowerCamelCase = [coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
lowerCamelCase = [coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowerCamelCase = []
for x, y, w, h in zip(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase = [x, y, x + w, y + h]
actual_boxes.append(_UpperCAmelCase )
# finally, normalize the bounding boxes
lowerCamelCase = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowerCamelCase__ ( a__):
"""simple docstring"""
_A = ['pixel_values']
def __init__(self , __a = True , __a = None , __a = PILImageResampling.BILINEAR , __a = True , __a = None , __a = "" , **__a , ):
'''simple docstring'''
super().__init__(**_snake_case )
lowerCamelCase = size if size is not None else {"height": 2_24, "width": 2_24}
lowerCamelCase = get_size_dict(_snake_case )
lowerCamelCase = do_resize
lowerCamelCase = size
lowerCamelCase = resample
lowerCamelCase = apply_ocr
lowerCamelCase = ocr_lang
lowerCamelCase = tesseract_config
def _a (self , __a , __a , __a = PILImageResampling.BILINEAR , __a = None , **__a , ):
'''simple docstring'''
lowerCamelCase = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}""" )
lowerCamelCase = (size["height"], size["width"])
return resize(_snake_case , size=_snake_case , resample=_snake_case , data_format=_snake_case , **_snake_case )
def _a (self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ):
'''simple docstring'''
lowerCamelCase = do_resize if do_resize is not None else self.do_resize
lowerCamelCase = size if size is not None else self.size
lowerCamelCase = get_size_dict(_snake_case )
lowerCamelCase = resample if resample is not None else self.resample
lowerCamelCase = apply_ocr if apply_ocr is not None else self.apply_ocr
lowerCamelCase = ocr_lang if ocr_lang is not None else self.ocr_lang
lowerCamelCase = tesseract_config if tesseract_config is not None else self.tesseract_config
lowerCamelCase = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
# All transformations expect numpy arrays.
lowerCamelCase = [to_numpy_array(_snake_case ) for image in images]
if apply_ocr:
requires_backends(self , "pytesseract" )
lowerCamelCase = []
lowerCamelCase = []
for image in images:
lowerCamelCase , lowerCamelCase = apply_tesseract(_snake_case , _snake_case , _snake_case )
words_batch.append(_snake_case )
boxes_batch.append(_snake_case )
if do_resize:
lowerCamelCase = [self.resize(image=_snake_case , size=_snake_case , resample=_snake_case ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
lowerCamelCase = [flip_channel_order(_snake_case ) for image in images]
lowerCamelCase = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images]
lowerCamelCase = BatchFeature(data={"pixel_values": images} , tensor_type=_snake_case )
if apply_ocr:
lowerCamelCase = words_batch
lowerCamelCase = boxes_batch
return data | 623 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
'''vinvino02/glpn-kitti''': '''https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json''',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class a ( a__ ):
snake_case__ = '''glpn'''
def __init__( self , _snake_case=3 , _snake_case=4 , _snake_case=[2, 2, 2, 2] , _snake_case=[8, 4, 2, 1] , _snake_case=[32, 64, 1_60, 2_56] , _snake_case=[7, 3, 3, 3] , _snake_case=[4, 2, 2, 2] , _snake_case=[1, 2, 5, 8] , _snake_case=[4, 4, 4, 4] , _snake_case="gelu" , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=0.1 , _snake_case=1E-6 , _snake_case=64 , _snake_case=10 , _snake_case=-1 , **_snake_case , ):
"""simple docstring"""
super().__init__(**_snake_case )
lowerCAmelCase = num_channels
lowerCAmelCase = num_encoder_blocks
lowerCAmelCase = depths
lowerCAmelCase = sr_ratios
lowerCAmelCase = hidden_sizes
lowerCAmelCase = patch_sizes
lowerCAmelCase = strides
lowerCAmelCase = mlp_ratios
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = drop_path_rate
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = decoder_hidden_size
lowerCAmelCase = max_depth
lowerCAmelCase = head_in_index
| 4 | 0 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=False , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ):
A_ : Dict = parent
A_ : List[str] = batch_size
A_ : str = seq_length
A_ : Optional[Any] = is_training
A_ : int = use_input_mask
A_ : Dict = use_token_type_ids
A_ : List[str] = use_labels
A_ : Optional[Any] = vocab_size
A_ : Union[str, Any] = hidden_size
A_ : List[Any] = num_hidden_layers
A_ : Any = num_attention_heads
A_ : Any = intermediate_size
A_ : Optional[Any] = hidden_act
A_ : Optional[int] = hidden_dropout_prob
A_ : Tuple = attention_probs_dropout_prob
A_ : Optional[int] = max_position_embeddings
A_ : str = type_vocab_size
A_ : int = type_sequence_label_size
A_ : List[Any] = initializer_range
A_ : Dict = num_labels
A_ : Dict = num_choices
A_ : Optional[int] = scope
def _a (self ):
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : Optional[int] = None
if self.use_input_mask:
A_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Optional[int] = None
if self.use_token_type_ids:
A_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : Tuple = None
A_ : List[str] = None
A_ : List[Any] = None
if self.use_labels:
A_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
A_ : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a (self ):
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , use_stable_embedding=_snake_case , )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : List[str] = OpenLlamaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A_ : Optional[Any] = model(_snake_case , attention_mask=_snake_case )
A_ : Optional[int] = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ):
A_ : Optional[Any] = True
A_ : List[str] = OpenLlamaModel(_snake_case )
model.to(_snake_case )
model.eval()
A_ : List[Any] = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , )
A_ : Union[str, Any] = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , )
A_ : Tuple = model(_snake_case , attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ):
A_ : int = OpenLlamaForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
A_ : Any = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ):
A_ : List[Any] = True
A_ : Any = True
A_ : int = OpenLlamaForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
# first forward pass
A_ : str = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , use_cache=_snake_case , )
A_ : List[str] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A_ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
A_ : List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A_ : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
A_ : Dict = torch.cat([input_mask, next_mask] , dim=-1 )
A_ : Dict = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , output_hidden_states=_snake_case , )["""hidden_states"""][0]
A_ : List[str] = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , past_key_values=_snake_case , output_hidden_states=_snake_case , )["""hidden_states"""][0]
# select random slice
A_ : Dict = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
A_ : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-3 ) )
def _a (self ):
A_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
A_
), (
A_
), (
A_
), (
A_
), (
A_
), (
A_
), (
A_
),
) : Tuple = config_and_inputs
A_ : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( a__ , a__ , a__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__SCREAMING_SNAKE_CASE : int = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : List[str] = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Any = False
__SCREAMING_SNAKE_CASE : Tuple = False
def _a (self ):
A_ : str = OpenLlamaModelTester(self )
A_ : int = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def _a (self ):
self.config_tester.run_common_tests()
def _a (self ):
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a (self ):
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ : Any = type
self.model_tester.create_and_check_model(*_snake_case )
def _a (self ):
A_, A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Union[str, Any] = 3
A_ : Any = input_dict["""input_ids"""]
A_ : Dict = input_ids.ne(1 ).to(_snake_case )
A_ : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A_ : List[str] = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
A_ : Any = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _a (self ):
A_, A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Dict = 3
A_ : Optional[int] = """single_label_classification"""
A_ : Optional[int] = input_dict["""input_ids"""]
A_ : Dict = input_ids.ne(1 ).to(_snake_case )
A_ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A_ : Union[str, Any] = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
A_ : Tuple = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _a (self ):
A_, A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Dict = 3
A_ : List[Any] = """multi_label_classification"""
A_ : Dict = input_dict["""input_ids"""]
A_ : Dict = input_ids.ne(1 ).to(_snake_case )
A_ : Tuple = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
A_ : Optional[Any] = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
A_ : Optional[Any] = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def _a (self ):
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _a (self , lowercase ):
A_, A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Any = ids_tensor([1, 10] , config.vocab_size )
A_ : Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : Any = OpenLlamaModel(_snake_case )
original_model.to(_snake_case )
original_model.eval()
A_ : Optional[Any] = original_model(_snake_case ).last_hidden_state
A_ : Optional[Any] = original_model(_snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : Any = {"""type""": scaling_type, """factor""": 10.0}
A_ : str = OpenLlamaModel(_snake_case )
scaled_model.to(_snake_case )
scaled_model.eval()
A_ : List[str] = scaled_model(_snake_case ).last_hidden_state
A_ : Optional[int] = scaled_model(_snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) ) | 667 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class a :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=2 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , _snake_case=10_00 , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
lowerCAmelCase = range_bbox
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCAmelCase = bbox[i, j, 3]
lowerCAmelCase = bbox[i, j, 1]
lowerCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCAmelCase = bbox[i, j, 2]
lowerCAmelCase = bbox[i, j, 0]
lowerCAmelCase = t
lowerCAmelCase = tf.convert_to_tensor(_snake_case )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMModel(config=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , token_type_ids=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForMaskedLM(config=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFLayoutLMForSequenceClassification(config=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFLayoutLMForTokenClassification(config=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForQuestionAnswering(config=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
snake_case__ = (
{
'''feature-extraction''': TFLayoutLMModel,
'''fill-mask''': TFLayoutLMForMaskedLM,
'''text-classification''': TFLayoutLMForSequenceClassification,
'''token-classification''': TFLayoutLMForTokenClassification,
'''zero-shot''': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ = False
snake_case__ = True
snake_case__ = 1_0
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = TFLayoutLMModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ():
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
lowerCAmelCase = tf.convert_to_tensor([[101,1019,1014,1016,1037,1_2849,4747,1004,1_4246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,1_1300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,1_9274,2772,6205,2_7814,1_6147,1_6147,4343,2047,1_0283,1_0969,1_4389,1012,2338,102]] ) # noqa: E231
lowerCAmelCase = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
lowerCAmelCase = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
lowerCAmelCase = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
lowerCAmelCase = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class a ( unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase = model(input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
# test the sequence output on [0, :3, :3]
lowerCAmelCase = tf.convert_to_tensor(
[[0.1_785, -0.1_947, -0.0_425], [-0.3_254, -0.2_807, 0.2_553], [-0.5_391, -0.3_322, 0.3_364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _snake_case , atol=1E-3 ) )
# test the pooled output on [1, :3]
lowerCAmelCase = tf.convert_to_tensor([-0.6_580, -0.0_214, 0.8_552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _snake_case , atol=1E-3 ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase = model(
input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
lowerCAmelCase = outputs.loss
lowerCAmelCase = (2,)
self.assertEqual(loss.shape , _snake_case )
# test the shape of the logits
lowerCAmelCase = outputs.logits
lowerCAmelCase = (2, 2)
self.assertEqual(logits.shape , _snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=13 )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase = model(
input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
# test the shape of the logits
lowerCAmelCase = outputs.logits
lowerCAmelCase = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , _snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase = model(input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
# test the shape of the logits
lowerCAmelCase = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , _snake_case )
self.assertEqual(outputs.end_logits.shape , _snake_case )
| 4 | 0 |
"""simple docstring"""
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case : Optional[int] = logging.get_logger(__name__)
__snake_case : str = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
__snake_case : str = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
__snake_case : Optional[Any] = {
'''abeja/gpt-neox-japanese-2.7b''': 2_048,
}
def _lowercase ( __snake_case ,__snake_case ) -> int:
with open(_UpperCAmelCase ,"r" ,encoding="utf-8" ) as f:
__lowerCAmelCase : List[Any] = json.loads(f.read() )
__lowerCAmelCase : Union[str, Any] = collections.OrderedDict()
__lowerCAmelCase : Optional[Any] = collections.OrderedDict()
__lowerCAmelCase : List[Any] = collections.OrderedDict()
with open(_UpperCAmelCase ,"r" ,encoding="utf-8" ) as f:
__lowerCAmelCase : List[str] = f.readlines()
__lowerCAmelCase : List[Any] = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(_UpperCAmelCase ):
__lowerCAmelCase : Optional[Any] = b
__lowerCAmelCase : List[Any] = idx
for wd in b:
__lowerCAmelCase : List[Any] = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class A__ ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__( self: List[Any] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: List[str]="<|endoftext|>" , _SCREAMING_SNAKE_CASE: int="<|endoftext|>" , _SCREAMING_SNAKE_CASE: str="<|startoftext|>" , _SCREAMING_SNAKE_CASE: Optional[Any]="<|endoftext|>" , _SCREAMING_SNAKE_CASE: Union[str, Any]=False , **_SCREAMING_SNAKE_CASE: Dict , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
unk_token=_snake_case , pad_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , do_clean_text=_snake_case , **_snake_case , )
if not os.path.isfile(_snake_case):
raise ValueError(
F"""Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(_snake_case):
raise ValueError(
F"""Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
__lowerCAmelCase : str = do_clean_text
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = load_vocab_and_emoji(_snake_case , _snake_case)
__lowerCAmelCase : str = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def _SCREAMING_SNAKE_CASE ( self: Dict) -> str:
"""simple docstring"""
return len(self.raw_vocab)
def _SCREAMING_SNAKE_CASE ( self: Any) -> Dict:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: int) -> Any:
"""simple docstring"""
return self.subword_tokenizer.tokenize(_snake_case , clean=self.do_clean_text)
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> Optional[int]:
"""simple docstring"""
return self.vocab.get(_snake_case , self.vocab.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: int) -> Optional[int]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(_snake_case)
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: Any) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[str] = "".join(_snake_case).strip()
return out_string
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: Optional[Any]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_snake_case , add_special_tokens=_snake_case) + [self.eos_token_id])
if len(_snake_case) > self.model_max_length:
__lowerCAmelCase : Optional[int] = input_ids[-self.model_max_length :]
return input_ids
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: int = None) -> int:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = 0
if os.path.isdir(_snake_case):
__lowerCAmelCase : Optional[int] = os.path.join(
_snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
__lowerCAmelCase : List[str] = os.path.join(
_snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
__lowerCAmelCase : int = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
__lowerCAmelCase : Dict = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(_snake_case , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
__lowerCAmelCase : List[str] = token_index
writer.write(",".join(_snake_case) + "\n")
index += 1
with open(_snake_case , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , _snake_case)
return vocab_file, emoji_file
class A__ ( a__ ):
'''simple docstring'''
def __init__( self: Optional[int] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: List[Any]) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = vocab # same as swe
__lowerCAmelCase : str = ids_to_tokens # same as bpe
__lowerCAmelCase : Optional[int] = emoji
__lowerCAmelCase : int = np.max([len(_snake_case) for w in self.vocab.keys()])
__lowerCAmelCase : str = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
__lowerCAmelCase : List[Any] = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
__lowerCAmelCase : Dict = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
__lowerCAmelCase : Any = re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
__lowerCAmelCase : List[Any] = re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
__lowerCAmelCase : Optional[int] = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*")
__lowerCAmelCase : Optional[int] = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
__lowerCAmelCase : List[Any] = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
__lowerCAmelCase : str = str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self: Dict) -> Dict:
"""simple docstring"""
return len(self.ids_to_tokens)
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: str) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.content_repattera.sub("<URL>" , _snake_case)
__lowerCAmelCase : List[Any] = self.content_repattera.sub("<EMAIL>" , _snake_case)
__lowerCAmelCase : List[Any] = self.content_repattera.sub("<TEL>" , _snake_case)
__lowerCAmelCase : str = self.content_repattera.sub("<DATE>" , _snake_case)
__lowerCAmelCase : int = self.content_repattera.sub("<DATE>" , _snake_case)
__lowerCAmelCase : List[str] = self.content_repattera.sub("<PRICE>" , _snake_case)
__lowerCAmelCase : Dict = content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
__lowerCAmelCase : str = content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Optional[Any]=False) -> str:
"""simple docstring"""
__lowerCAmelCase : List[str] = text.replace(" " , "<SP>")
__lowerCAmelCase : str = text.replace(" " , "<SP>")
__lowerCAmelCase : Any = text.replace("\r\n" , "<BR>")
__lowerCAmelCase : int = text.replace("\n" , "<BR>")
__lowerCAmelCase : Dict = text.replace("\r" , "<BR>")
__lowerCAmelCase : Any = text.replace("\t" , "<TAB>")
__lowerCAmelCase : Any = text.replace("—" , "ー")
__lowerCAmelCase : Optional[Any] = text.replace("−" , "ー")
for k, v in self.emoji["emoji"].items():
if k in text:
__lowerCAmelCase : Any = text.replace(_snake_case , _snake_case)
if clean:
__lowerCAmelCase : Dict = self.clean_text(_snake_case)
def check_simbol(_SCREAMING_SNAKE_CASE: int):
__lowerCAmelCase : int = x.encode()
if len(_snake_case) == 1 and len(_snake_case) == 2:
__lowerCAmelCase : List[str] = (int(e[0]) << 8) + int(e[1])
if (
(c >= 0XC2_A1 and c <= 0XC2_BF)
or (c >= 0XC7_80 and c <= 0XC7_83)
or (c >= 0XCA_B9 and c <= 0XCB_BF)
or (c >= 0XCC_80 and c <= 0XCD_A2)
):
return True
return False
def checkuae(_SCREAMING_SNAKE_CASE: Tuple):
__lowerCAmelCase : List[Any] = x.encode()
if len(_snake_case) == 1 and len(_snake_case) == 3:
__lowerCAmelCase : List[Any] = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2])
if c >= 0XE2_80_80 and c <= 0XE2_B0_7F:
return True
return False
__lowerCAmelCase : Tuple = 0
__lowerCAmelCase : Union[str, Any] = []
while pos < len(_snake_case):
__lowerCAmelCase : int = min(len(_snake_case) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
__lowerCAmelCase : int = [] # (token_id, token, pos)
for e in range(_snake_case , _snake_case , -1):
__lowerCAmelCase : int = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_snake_case) > 2:
__lowerCAmelCase : Optional[int] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(_snake_case) > 0:
# the smallest token_id is adopted
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Tuple = sorted(_snake_case , key=lambda _SCREAMING_SNAKE_CASE: x[0])[0]
result.append(_snake_case)
__lowerCAmelCase : Tuple = e
else:
__lowerCAmelCase : Dict = pos + 1
__lowerCAmelCase : Optional[Any] = text[pos:end]
if check_simbol(_snake_case):
result.append("<KIGOU>")
elif checkuae(_snake_case):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
__lowerCAmelCase : int = end
return result
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: List[str]="\n") -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = []
__lowerCAmelCase : str = []
__lowerCAmelCase : Dict = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(_snake_case) > 0:
words.append(bytearray(_snake_case).decode("utf-8" , errors="replace"))
__lowerCAmelCase : Optional[int] = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(_snake_case)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("▀")
elif word == "<KIGOU>":
words.append("ǀ")
elif word == "<U2000U2BFF>":
words.append("‖")
else:
words.append(_snake_case)
if len(_snake_case) > 0:
words.append(bytearray(_snake_case).decode("utf-8" , errors="replace"))
__lowerCAmelCase : int = "".join(_snake_case)
return text | 293 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__UpperCamelCase : Union[str, Any] = '''examples/'''
__UpperCamelCase : str = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__UpperCamelCase : List[str] = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
__UpperCamelCase : Optional[int] = '''README.md'''
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ):
with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCAmelCase = f.read()
lowerCAmelCase ,lowerCAmelCase = REPLACE_PATTERNS[pattern]
lowerCAmelCase = replace.replace('VERSION' , _UpperCAmelCase )
lowerCAmelCase = re_pattern.sub(_UpperCAmelCase , _UpperCAmelCase )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] ):
for folder, directories, fnames in os.walk(_UpperCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase , pattern='examples' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Dict=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if not patch:
update_version_in_examples(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = '🤗 Transformers currently provides the following architectures'
lowerCAmelCase = '1. Want to contribute a new model?'
with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCAmelCase = f.readlines()
# Find the start of the list.
lowerCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
lowerCAmelCase = lines[index].replace(
'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , )
index += 1
with open(_UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ():
with open(REPLACE_FILES['init'] , 'r' ) as f:
lowerCAmelCase = f.read()
lowerCAmelCase = REPLACE_PATTERNS['init'][0].search(_UpperCAmelCase ).groups()[0]
return packaging.version.parse(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple=False ):
lowerCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
lowerCAmelCase = default_version.base_version
elif patch:
lowerCAmelCase = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
lowerCAmelCase = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
lowerCAmelCase = input(F'Which version are you releasing? [{default_version}]' )
if len(_UpperCAmelCase ) == 0:
lowerCAmelCase = default_version
print(F'Updating version to {version}.' )
global_version_update(_UpperCAmelCase , patch=_UpperCAmelCase )
if not patch:
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = get_version()
lowerCAmelCase = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
lowerCAmelCase = current_version.base_version
# Check with the user we got that right.
lowerCAmelCase = input(F'Which version are we developing now? [{dev_version}]' )
if len(_UpperCAmelCase ) == 0:
lowerCAmelCase = dev_version
print(F'Updating version to {version}.' )
global_version_update(_UpperCAmelCase )
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__UpperCamelCase : Optional[int] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 4 | 0 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
__a: Any = logging.get_logger(__name__)
__a: List[Any] = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
__a: List[str] = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : List[Any] = torch.load(_UpperCAmelCase , map_location='''cpu''' )
return sd
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=rename_keys_prefix ):
lowercase__ : List[Any] = OrderedDict()
lowercase__ : Any = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
lowercase__ : Any = key
for name_pair in rename_keys_prefix:
lowercase__ : List[str] = new_key.replace(name_pair[0] , name_pair[1] )
lowercase__ : Tuple = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
lowercase__ : Union[str, Any] = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), F"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
lowercase__ : Optional[int] = '''pretraining'''
if "vcr" in checkpoint_path:
lowercase__ : Optional[int] = {'''visual_embedding_dim''': 512}
elif "vqa_advanced" in checkpoint_path:
lowercase__ : List[str] = {'''visual_embedding_dim''': 2048}
elif "vqa" in checkpoint_path:
lowercase__ : str = {'''visual_embedding_dim''': 2048}
elif "nlvr" in checkpoint_path:
lowercase__ : Any = {'''visual_embedding_dim''': 1024}
else:
raise NotImplementedError(F"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
lowercase__ : List[str] = {'''visual_embedding_dim''': 512}
lowercase__ : Any = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
lowercase__ : Optional[int] = {'''visual_embedding_dim''': 2048}
lowercase__ : Tuple = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
lowercase__ : Optional[int] = {'''visual_embedding_dim''': 2048, '''num_labels''': 3129}
lowercase__ : Union[str, Any] = '''vqa'''
elif "nlvr" in checkpoint_path:
lowercase__ : Tuple = {
'''visual_embedding_dim''': 1024,
'''num_labels''': 2,
}
lowercase__ : int = '''nlvr'''
lowercase__ : Dict = VisualBertConfig(**_UpperCAmelCase )
# Load State Dict
lowercase__ : str = load_state_dict(_UpperCAmelCase )
lowercase__ : Dict = get_new_dict(_UpperCAmelCase , _UpperCAmelCase )
if model_type == "pretraining":
lowercase__ : List[str] = VisualBertForPreTraining(_UpperCAmelCase )
elif model_type == "vqa":
lowercase__ : int = VisualBertForQuestionAnswering(_UpperCAmelCase )
elif model_type == "nlvr":
lowercase__ : List[str] = VisualBertForVisualReasoning(_UpperCAmelCase )
elif model_type == "multichoice":
lowercase__ : int = VisualBertForMultipleChoice(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
# Save Checkpoints
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__a: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
__a: Optional[Any] = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 152 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__UpperCamelCase : Optional[int] = pytest.mark.integration
@require_faiss
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_snake_case ) for x in np.arange(30 ).tolist()]} )
return dset
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = self._create_dummy_dataset()
lowerCAmelCase = dset.map(
lambda _snake_case , _snake_case : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_snake_case , keep_in_memory=_snake_case )
lowerCAmelCase = dset.add_faiss_index('vecs' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_snake_case ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(_snake_case , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
from elasticsearch import Elasticsearch
lowerCAmelCase = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCAmelCase = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
lowerCAmelCase = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=_snake_case )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase = 1
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case )
self.assertRaises(_snake_case , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCAmelCase = np.eye(5 , dtype=np.floataa )[::-1]
lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case )
self.assertRaises(_snake_case , index.search_batch , queries[0] )
lowerCAmelCase = [scores[0] for scores in total_scores]
lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_snake_case ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCAmelCase = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(_snake_case ):
lowerCAmelCase = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = faiss.IndexFlat(5 )
lowerCAmelCase = FaissIndex(custom_index=_snake_case )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_snake_case ) as tmp_file:
index.save(tmp_file.name )
lowerCAmelCase = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase = 1
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict ):
import faiss
lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCAmelCase = 'index.faiss'
lowerCAmelCase = F'mock://{index_name}'
index.save(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCAmelCase = FaissIndex.load(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase = 1
lowerCAmelCase ,lowerCAmelCase = index.search(_UpperCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCAmelCase = Elasticsearch()
lowerCAmelCase = {'acknowledged': True}
lowerCAmelCase = ElasticSearchIndex(es_client=_snake_case )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
lowerCAmelCase = 'foo'
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCAmelCase = 'foo'
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCAmelCase = ['foo', 'bar', 'foobar']
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case )
lowerCAmelCase = [scores[0] for scores in total_scores]
lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_snake_case ) , 0 )
self.assertListEqual([1, 1, 1] , _snake_case )
# batched queries with timeout
lowerCAmelCase = ['foo', 'bar', 'foobar']
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case , request_timeout=30 )
lowerCAmelCase = [scores[0] for scores in total_scores]
lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_snake_case ) , 0 )
self.assertListEqual([1, 1, 1] , _snake_case )
| 4 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__A : Any = logging.get_logger(__name__)
class lowerCamelCase ( a__ ):
lowercase : Any = ['input_features', 'attention_mask']
def __init__( self , SCREAMING_SNAKE_CASE_=80 , SCREAMING_SNAKE_CASE_=1_6000 , SCREAMING_SNAKE_CASE_=80 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(feature_size=_snake_case , sampling_rate=_snake_case , padding_value=_snake_case , **_snake_case )
UpperCamelCase : Optional[int] = num_mel_bins
UpperCamelCase : Optional[Any] = do_ceptral_normalize
UpperCamelCase : Tuple = normalize_means
UpperCamelCase : Optional[Any] = normalize_vars
UpperCamelCase : Union[str, Any] = True
def a_ ( self , SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase : Optional[Any] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
UpperCamelCase : List[str] = torch.from_numpy(_snake_case ).unsqueeze(0 )
UpperCamelCase : Dict = ta_kaldi.fbank(_snake_case , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def a_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0.0 , ):
if normalize_means:
UpperCamelCase : List[Any] = x[:input_length].mean(axis=0 )
UpperCamelCase : int = np.subtract(_snake_case , _snake_case )
if normalize_vars:
UpperCamelCase : Union[str, Any] = x[:input_length].std(axis=0 )
UpperCamelCase : Dict = np.divide(_snake_case , _snake_case )
if input_length < x.shape[0]:
UpperCamelCase : Optional[int] = padding_value
# make sure array is in float32
UpperCamelCase : Optional[Any] = x.astype(np.floataa )
return x
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
UpperCamelCase : Dict = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(_snake_case , _snake_case , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(_snake_case , _snake_case )
]
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
UpperCamelCase : Optional[Any] = isinstance(_snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
UpperCamelCase : str = is_batched_numpy or (
isinstance(_snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase : Any = [np.asarray(_snake_case , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_snake_case , np.ndarray ):
UpperCamelCase : int = np.asarray(_snake_case , dtype=np.floataa )
elif isinstance(_snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase : int = [raw_speech]
# extract fbank features
UpperCamelCase : Dict = [self._extract_fbank_features(_snake_case ) for waveform in raw_speech]
# convert into correct format for padding
UpperCamelCase : Dict = BatchFeature({"""input_features""": features} )
UpperCamelCase : Optional[int] = self.pad(
_snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=_snake_case , **_snake_case , )
# make sure list is in array format
UpperCamelCase : Any = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , _snake_case ):
UpperCamelCase : Any = [np.asarray(_snake_case , dtype=np.floataa ) for feature in input_features]
UpperCamelCase : List[str] = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
UpperCamelCase : Union[str, Any] = [np.asarray(_snake_case , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
UpperCamelCase : Tuple = (
np.array(_snake_case , dtype=np.intaa )
if self._get_padding_strategies(_snake_case , max_length=_snake_case ) is not PaddingStrategy.DO_NOT_PAD
else None
)
UpperCamelCase : Optional[Any] = self.normalize(
padded_inputs["""input_features"""] , attention_mask=_snake_case )
if return_tensors is not None:
UpperCamelCase : Optional[Any] = padded_inputs.convert_to_tensors(_snake_case )
return padded_inputs
| 499 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = IFInpaintingPipeline
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._get_dummy_components()
def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(_snake_case )
else:
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_local()
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 4 | 0 |
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__UpperCAmelCase = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
__UpperCAmelCase = {
# fairseq:
'''wmt19-ru-en''': {'''length_penalty''': 1.1},
'''wmt19-en-ru''': {'''length_penalty''': 1.15},
'''wmt19-en-de''': {'''length_penalty''': 1.0},
'''wmt19-de-en''': {'''length_penalty''': 1.1},
# allenai:
'''wmt16-en-de-dist-12-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-dist-6-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-12-1''': {'''length_penalty''': 0.8},
'''wmt19-de-en-6-6-base''': {'''length_penalty''': 0.6},
'''wmt19-de-en-6-6-big''': {'''length_penalty''': 0.6},
}
# this remaps the different models to their organization names
__UpperCAmelCase = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__UpperCAmelCase = '''facebook'''
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
__UpperCAmelCase = '''allenai'''
def snake_case_ (__A : Union[str, Any] ) -> List[str]:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
__lowerCAmelCase : Optional[Any] = dict((re.sub(r"""@@$""" , """""" , _UpperCAmelCase ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , _UpperCAmelCase ), v) for k, v in d.items() )
__lowerCAmelCase : List[str] = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
__lowerCAmelCase : Any = d[k] # restore
return da
def snake_case_ (__A : str , __A : Any ) -> str:
# prep
assert os.path.exists(_UpperCAmelCase )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
__lowerCAmelCase : Union[str, Any] = basename(_UpperCAmelCase )
__lowerCAmelCase : Tuple = dirname(_UpperCAmelCase )
__lowerCAmelCase : Union[str, Any] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
__lowerCAmelCase : Optional[Any] = cls.hub_models()
__lowerCAmelCase : Any = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""}
__lowerCAmelCase : Tuple = """."""
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
__lowerCAmelCase : Optional[Any] = hub_utils.from_pretrained(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , archive_map=_UpperCAmelCase , **_UpperCAmelCase )
__lowerCAmelCase : Optional[Any] = vars(chkpt["""args"""]["""model"""] )
__lowerCAmelCase : Optional[Any] = args["""source_lang"""]
__lowerCAmelCase : List[Any] = args["""target_lang"""]
__lowerCAmelCase : str = dirname(_UpperCAmelCase )
__lowerCAmelCase : int = basename(_UpperCAmelCase )
# dicts
__lowerCAmelCase : Tuple = os.path.join(_UpperCAmelCase , f'''dict.{src_lang}.txt''' )
__lowerCAmelCase : int = os.path.join(_UpperCAmelCase , f'''dict.{tgt_lang}.txt''' )
__lowerCAmelCase : Union[str, Any] = Dictionary.load(_UpperCAmelCase )
__lowerCAmelCase : Tuple = rewrite_dict_keys(src_dict.indices )
__lowerCAmelCase : Optional[Any] = len(_UpperCAmelCase )
__lowerCAmelCase : List[str] = os.path.join(_UpperCAmelCase , """vocab-src.json""" )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(_UpperCAmelCase , ensure_ascii=_UpperCAmelCase , indent=_UpperCAmelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
__lowerCAmelCase : int = True
for k in src_vocab.keys():
if not k.islower():
__lowerCAmelCase : List[Any] = False
break
__lowerCAmelCase : Optional[int] = Dictionary.load(_UpperCAmelCase )
__lowerCAmelCase : List[Any] = rewrite_dict_keys(tgt_dict.indices )
__lowerCAmelCase : List[Any] = len(_UpperCAmelCase )
__lowerCAmelCase : int = os.path.join(_UpperCAmelCase , """vocab-tgt.json""" )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(_UpperCAmelCase , ensure_ascii=_UpperCAmelCase , indent=_UpperCAmelCase ) )
# merges_file (bpecodes)
__lowerCAmelCase : str = os.path.join(_UpperCAmelCase , VOCAB_FILES_NAMES["""merges_file"""] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
__lowerCAmelCase : str = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if os.path.exists(_UpperCAmelCase ):
break
with open(_UpperCAmelCase , encoding="""utf-8""" ) as fin:
__lowerCAmelCase : Optional[Any] = fin.read()
__lowerCAmelCase : Optional[Any] = re.sub(r""" \d+$""" , """""" , _UpperCAmelCase , 0 , re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" ) as fout:
fout.write(_UpperCAmelCase )
# model config
__lowerCAmelCase : List[str] = os.path.join(_UpperCAmelCase , """config.json""" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args['tokenizer']}'''
__lowerCAmelCase : List[Any] = {
"""architectures""": ["""FSMTForConditionalGeneration"""],
"""model_type""": """fsmt""",
"""activation_dropout""": args["""activation_dropout"""],
"""activation_function""": """relu""",
"""attention_dropout""": args["""attention_dropout"""],
"""d_model""": args["""decoder_embed_dim"""],
"""dropout""": args["""dropout"""],
"""init_std""": 0.02,
"""max_position_embeddings""": args["""max_source_positions"""],
"""num_hidden_layers""": args["""encoder_layers"""],
"""src_vocab_size""": src_vocab_size,
"""tgt_vocab_size""": tgt_vocab_size,
"""langs""": [src_lang, tgt_lang],
"""encoder_attention_heads""": args["""encoder_attention_heads"""],
"""encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""],
"""encoder_layerdrop""": args["""encoder_layerdrop"""],
"""encoder_layers""": args["""encoder_layers"""],
"""decoder_attention_heads""": args["""decoder_attention_heads"""],
"""decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""],
"""decoder_layerdrop""": args["""decoder_layerdrop"""],
"""decoder_layers""": args["""decoder_layers"""],
"""bos_token_id""": 0,
"""pad_token_id""": 1,
"""eos_token_id""": 2,
"""is_encoder_decoder""": True,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_all_embeddings"""],
}
# good hparam defaults to start with
__lowerCAmelCase : Tuple = 5
__lowerCAmelCase : Dict = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
__lowerCAmelCase : List[str] = best_score_hparams[model_dir]["""length_penalty"""]
else:
__lowerCAmelCase : List[Any] = 1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(_UpperCAmelCase , ensure_ascii=_UpperCAmelCase , indent=_UpperCAmelCase ) )
# tokenizer config
__lowerCAmelCase : Union[str, Any] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
__lowerCAmelCase : Tuple = {
"""langs""": [src_lang, tgt_lang],
"""model_max_length""": 1_0_2_4,
"""do_lower_case""": do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(_UpperCAmelCase , ensure_ascii=_UpperCAmelCase , indent=_UpperCAmelCase ) )
# model
__lowerCAmelCase : Union[str, Any] = chkpt["""models"""][0]
__lowerCAmelCase : Optional[Any] = model.state_dict()
# rename keys to start with 'model.'
__lowerCAmelCase : Optional[Any] = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
__lowerCAmelCase : Union[str, Any] = [
"""model.model""",
"""model.encoder.version""",
"""model.decoder.version""",
"""model.encoder_embed_tokens.weight""",
"""model.decoder_embed_tokens.weight""",
"""model.encoder.embed_positions._float_tensor""",
"""model.decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
model_state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
__lowerCAmelCase : Union[str, Any] = FSMTConfig.from_pretrained(_UpperCAmelCase )
__lowerCAmelCase : List[Any] = FSMTForConditionalGeneration(_UpperCAmelCase )
# check that it loads ok
model_new.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
# save
__lowerCAmelCase : Dict = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
print("""Conversion is done!""" )
print("""\nLast step is to upload the files to s3""" )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fsmt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCAmelCase = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 651 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class a :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
lowerCAmelCase = self.vocab_size - 1
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , head_mask=_snake_case )
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTLMHeadModel(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTDoubleHeadsModel(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = OpenAIGPTForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class a ( a__ , a__ , a__ , unittest.TestCase ):
snake_case__ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
snake_case__ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
snake_case__ = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case=False ):
"""simple docstring"""
lowerCAmelCase = super()._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_snake_case , )
lowerCAmelCase = inputs_dict['labels']
lowerCAmelCase = inputs_dict['labels']
lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_snake_case , )
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_snake_case )
return inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , n_embd=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = OpenAIGPTModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@require_torch
class a ( unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(_snake_case )
lowerCAmelCase = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=_snake_case ) # the president is
lowerCAmelCase = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCAmelCase = model.generate(_snake_case , do_sample=_snake_case )
self.assertListEqual(output_ids[0].tolist() , _snake_case )
| 4 | 0 |
class UpperCamelCase__ :
def __init__( self : List[Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
lowercase_ = arr.split(""",""" )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = [int(self.array[0] )] * len(self.array )
lowercase_ = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
lowercase_ = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
lowercase_ = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
a = input('please input some numbers:')
a = SubArray(whole_array)
a = array.solve_sub_array()
print(('the results is:', re))
| 412 |
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__UpperCamelCase : str = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path' , type=_UpperCAmelCase , default='data/dump.txt' , help='The path to the data.' )
parser.add_argument('--tokenizer_type' , type=_UpperCAmelCase , default='bert' , choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name' , type=_UpperCAmelCase , default='bert-base-uncased' , help='The tokenizer to use.' )
parser.add_argument('--dump_file' , type=_UpperCAmelCase , default='data/dump' , help='The dump file prefix.' )
lowerCAmelCase = parser.parse_args()
logger.info(F'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
lowerCAmelCase = BertTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase = tokenizer.special_tokens_map['cls_token'] # `[CLS]`
lowerCAmelCase = tokenizer.special_tokens_map['sep_token'] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowerCAmelCase = RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase = tokenizer.special_tokens_map['cls_token'] # `<s>`
lowerCAmelCase = tokenizer.special_tokens_map['sep_token'] # `</s>`
elif args.tokenizer_type == "gpt2":
lowerCAmelCase = GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>`
lowerCAmelCase = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>`
logger.info(F'Loading text from {args.file_path}' )
with open(args.file_path , 'r' , encoding='utf8' ) as fp:
lowerCAmelCase = fp.readlines()
logger.info('Start encoding' )
logger.info(F'{len(_UpperCAmelCase )} examples to process.' )
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 1_0000
lowerCAmelCase = time.time()
for text in data:
lowerCAmelCase = F'{bos} {text.strip()} {sep}'
lowerCAmelCase = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
rslt.append(_UpperCAmelCase )
iter += 1
if iter % interval == 0:
lowerCAmelCase = time.time()
logger.info(F'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
lowerCAmelCase = time.time()
logger.info('Finished binarization' )
logger.info(F'{len(_UpperCAmelCase )} examples processed.' )
lowerCAmelCase = F'{args.dump_file}.{args.tokenizer_name}.pickle'
lowerCAmelCase = tokenizer.vocab_size
if vocab_size < (1 << 16):
lowerCAmelCase = [np.uintaa(_UpperCAmelCase ) for d in rslt]
else:
lowerCAmelCase = [np.intaa(_UpperCAmelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'Dump to {dp_file}' )
with open(_UpperCAmelCase , 'wb' ) as handle:
pickle.dump(rslt_ , _UpperCAmelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 4 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=a__ )
class a ( a__ ):
"""simple docstring"""
a : List[Any] = field(default='summarization' , metadata={'include_in_asdict_even_if_is_default': True} )
a : Optional[Any] = Features({'text': Value('string' )} )
a : int = Features({'summary': Value('string' )} )
a : Union[str, Any] = 'text'
a : Optional[Any] = 'summary'
@property
def UpperCAmelCase ( self : List[str] ) -> Tuple:
return {self.text_column: "text", self.summary_column: "summary"}
| 63 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class a ( a__ ):
snake_case__ = '''bert'''
def __init__( self , _snake_case=3_05_22 , _snake_case=7_68 , _snake_case=12 , _snake_case=12 , _snake_case=30_72 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=0 , _snake_case="absolute" , _snake_case=True , _snake_case=None , **_snake_case , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , **_snake_case )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
lowerCAmelCase = classifier_dropout
class a ( a__ ):
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 4 | 0 |
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = torch.exp(_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : str = torch.sum(_UpperCAmelCase , dim=1 ) # sum of exp(x_i)
__SCREAMING_SNAKE_CASE : int = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_UpperCAmelCase ) - B / A
class __UpperCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : int , _A : Optional[int] ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE : str = config.output_attentions
__SCREAMING_SNAKE_CASE : Any = config.output_hidden_states
__SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList([BertLayer(_snake_case ) for _ in range(config.num_hidden_layers )] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.ModuleList([BertHighway(_snake_case ) for _ in range(config.num_hidden_layers )] )
__SCREAMING_SNAKE_CASE : Any = [-1 for _ in range(config.num_hidden_layers )]
def UpperCAmelCase__ ( self : List[str] , _A : Optional[int] ):
"""simple docstring"""
if (type(_snake_case ) is float) or (type(_snake_case ) is int):
for i in range(len(self.early_exit_entropy ) ):
__SCREAMING_SNAKE_CASE : Any = x
else:
__SCREAMING_SNAKE_CASE : Tuple = x
def UpperCAmelCase__ ( self : str , _A : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def UpperCAmelCase__ ( self : List[Any] , _A : Any , _A : Union[str, Any]=None , _A : Optional[int]=None , _A : List[Any]=None , _A : int=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = ()
__SCREAMING_SNAKE_CASE : Union[str, Any] = ()
__SCREAMING_SNAKE_CASE : Union[str, Any] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__SCREAMING_SNAKE_CASE : Dict = all_hidden_states + (hidden_states,)
__SCREAMING_SNAKE_CASE : Union[str, Any] = layer_module(
_snake_case , _snake_case , head_mask[i] , _snake_case , _snake_case )
__SCREAMING_SNAKE_CASE : Any = layer_outputs[0]
if self.output_attentions:
__SCREAMING_SNAKE_CASE : Tuple = all_attentions + (layer_outputs[1],)
__SCREAMING_SNAKE_CASE : Optional[Any] = (hidden_states,)
if self.output_hidden_states:
__SCREAMING_SNAKE_CASE : Tuple = current_outputs + (all_hidden_states,)
if self.output_attentions:
__SCREAMING_SNAKE_CASE : str = current_outputs + (all_attentions,)
__SCREAMING_SNAKE_CASE : str = self.highway[i](_snake_case )
# logits, pooled_output
if not self.training:
__SCREAMING_SNAKE_CASE : Optional[Any] = highway_exit[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = entropy(_snake_case )
__SCREAMING_SNAKE_CASE : str = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__SCREAMING_SNAKE_CASE : str = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__SCREAMING_SNAKE_CASE : Any = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(_snake_case , i + 1 )
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__SCREAMING_SNAKE_CASE : List[str] = all_hidden_states + (hidden_states,)
__SCREAMING_SNAKE_CASE : Any = (hidden_states,)
if self.output_hidden_states:
__SCREAMING_SNAKE_CASE : Any = outputs + (all_hidden_states,)
if self.output_attentions:
__SCREAMING_SNAKE_CASE : List[str] = outputs + (all_attentions,)
__SCREAMING_SNAKE_CASE : Optional[Any] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , a__ , )
class __UpperCamelCase ( a__ ):
"""simple docstring"""
def __init__( self : str , _A : List[Any] ):
"""simple docstring"""
super().__init__(_snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = config
__SCREAMING_SNAKE_CASE : int = BertEmbeddings(_snake_case )
__SCREAMING_SNAKE_CASE : Dict = DeeBertEncoder(_snake_case )
__SCREAMING_SNAKE_CASE : Tuple = BertPooler(_snake_case )
self.init_weights()
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
self.encoder.init_highway_pooler(self.pooler )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
return self.embeddings.word_embeddings
def UpperCAmelCase__ ( self : int , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = value
def UpperCAmelCase__ ( self : Tuple , _A : List[str] ):
"""simple docstring"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(_snake_case )
@add_start_docstrings_to_model_forward(_snake_case )
def UpperCAmelCase__ ( self : Union[str, Any] , _A : Optional[int]=None , _A : List[str]=None , _A : Any=None , _A : Any=None , _A : Optional[Any]=None , _A : Optional[Any]=None , _A : List[str]=None , _A : int=None , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
__SCREAMING_SNAKE_CASE : Dict = input_ids.size()
elif inputs_embeds is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__SCREAMING_SNAKE_CASE : Dict = torch.ones(_snake_case , device=_snake_case )
if encoder_attention_mask is None:
__SCREAMING_SNAKE_CASE : str = torch.ones(_snake_case , device=_snake_case )
if token_type_ids is None:
__SCREAMING_SNAKE_CASE : Optional[int] = torch.zeros(_snake_case , dtype=torch.long , device=_snake_case )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__SCREAMING_SNAKE_CASE : int = self.get_extended_attention_mask(_snake_case , _snake_case , _snake_case )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__SCREAMING_SNAKE_CASE : Dict = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__SCREAMING_SNAKE_CASE : int = encoder_attention_mask[:, None, None, :]
__SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__SCREAMING_SNAKE_CASE : Union[str, Any] = (1.0 - encoder_extended_attention_mask) * -1_0000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__SCREAMING_SNAKE_CASE : Any = self.get_head_mask(_snake_case , self.config.num_hidden_layers )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.embeddings(
input_ids=_snake_case , position_ids=_snake_case , token_type_ids=_snake_case , inputs_embeds=_snake_case )
__SCREAMING_SNAKE_CASE : List[str] = self.encoder(
_snake_case , attention_mask=_snake_case , head_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_outputs[0]
__SCREAMING_SNAKE_CASE : List[Any] = self.pooler(_snake_case )
__SCREAMING_SNAKE_CASE : Dict = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class __UpperCamelCase ( a__ ):
"""simple docstring"""
def __init__( self : List[Any] , _A : Dict , _A : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = message
__SCREAMING_SNAKE_CASE : Tuple = exit_layer # start from 1!
class __UpperCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , _A : List[str] ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE : Optional[int] = BertPooler(_snake_case )
__SCREAMING_SNAKE_CASE : Dict = nn.Dropout(config.hidden_dropout_prob )
__SCREAMING_SNAKE_CASE : Dict = nn.Linear(config.hidden_size , config.num_labels )
def UpperCAmelCase__ ( self : Any , _A : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = encoder_outputs[0]
__SCREAMING_SNAKE_CASE : int = self.pooler(_snake_case )
# "return" pooler_output
# BertModel
__SCREAMING_SNAKE_CASE : Dict = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__SCREAMING_SNAKE_CASE : str = bmodel_output[1]
__SCREAMING_SNAKE_CASE : List[str] = self.dropout(_snake_case )
__SCREAMING_SNAKE_CASE : List[str] = self.classifier(_snake_case )
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''' , a__ , )
class __UpperCamelCase ( a__ ):
"""simple docstring"""
def __init__( self : Optional[int] , _A : Any ):
"""simple docstring"""
super().__init__(_snake_case )
__SCREAMING_SNAKE_CASE : Dict = config.num_labels
__SCREAMING_SNAKE_CASE : int = config.num_hidden_layers
__SCREAMING_SNAKE_CASE : List[Any] = DeeBertModel(_snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Dropout(config.hidden_dropout_prob )
__SCREAMING_SNAKE_CASE : Any = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(_snake_case )
def UpperCAmelCase__ ( self : List[str] , _A : List[Any]=None , _A : Union[str, Any]=None , _A : Dict=None , _A : Optional[Any]=None , _A : Optional[Any]=None , _A : List[Any]=None , _A : Union[str, Any]=None , _A : Optional[int]=-1 , _A : int=False , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.num_layers
try:
__SCREAMING_SNAKE_CASE : List[Any] = self.bert(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , position_ids=_snake_case , head_mask=_snake_case , inputs_embeds=_snake_case , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__SCREAMING_SNAKE_CASE : str = outputs[1]
__SCREAMING_SNAKE_CASE : Tuple = self.dropout(_snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = self.classifier(_snake_case )
__SCREAMING_SNAKE_CASE : str = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__SCREAMING_SNAKE_CASE : Tuple = e.message
__SCREAMING_SNAKE_CASE : Union[str, Any] = e.exit_layer
__SCREAMING_SNAKE_CASE : List[str] = outputs[0]
if not self.training:
__SCREAMING_SNAKE_CASE : Optional[int] = entropy(_snake_case )
__SCREAMING_SNAKE_CASE : List[str] = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__SCREAMING_SNAKE_CASE : List[str] = MSELoss()
__SCREAMING_SNAKE_CASE : Union[str, Any] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__SCREAMING_SNAKE_CASE : int = CrossEntropyLoss()
__SCREAMING_SNAKE_CASE : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
for highway_exit in outputs[-1]:
__SCREAMING_SNAKE_CASE : Tuple = highway_exit[0]
if not self.training:
highway_logits_all.append(_snake_case )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__SCREAMING_SNAKE_CASE : Union[str, Any] = MSELoss()
__SCREAMING_SNAKE_CASE : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__SCREAMING_SNAKE_CASE : str = CrossEntropyLoss()
__SCREAMING_SNAKE_CASE : Dict = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_snake_case )
if train_highway:
__SCREAMING_SNAKE_CASE : str = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__SCREAMING_SNAKE_CASE : str = (loss,) + outputs
if not self.training:
__SCREAMING_SNAKE_CASE : Dict = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 74 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a ( a__ , unittest.TestCase ):
snake_case__ = DanceDiffusionPipeline
snake_case__ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {
'''callback''',
'''latents''',
'''callback_steps''',
'''output_type''',
'''num_images_per_prompt''',
}
snake_case__ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_snake_case , use_timestep_embedding=_snake_case , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
lowerCAmelCase = IPNDMScheduler()
lowerCAmelCase = {
'unet': unet,
'scheduler': scheduler,
}
return components
def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(_snake_case )
else:
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowerCAmelCase = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = DanceDiffusionPipeline(**_snake_case )
lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = self.get_dummy_inputs(_snake_case )
lowerCAmelCase = pipe(**_snake_case )
lowerCAmelCase = output.audios
lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowerCAmelCase = np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = torch_device
lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(generator=_snake_case , num_inference_steps=1_00 , audio_length_in_s=4.096 )
lowerCAmelCase = output.audios
lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase = np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = torch_device
lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(generator=_snake_case , num_inference_steps=1_00 , audio_length_in_s=4.096 )
lowerCAmelCase = output.audios
lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase = np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 4 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : Tuple = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
UpperCAmelCase : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _SCREAMING_SNAKE_CASE ( a ) -> List[str]:
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
__A : Any = model_type_to_module_name(_UpperCAmelCase )
__A : List[str] = importlib.import_module(F""".{module_name}""" , 'transformers.models' )
try:
return getattr(_UpperCAmelCase , _UpperCAmelCase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_UpperCAmelCase , '__name__' , _UpperCAmelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__A : Union[str, Any] = importlib.import_module('transformers' )
if hasattr(_UpperCAmelCase , _UpperCAmelCase ):
return getattr(_UpperCAmelCase , _UpperCAmelCase )
return None
def _SCREAMING_SNAKE_CASE ( a , a = None , a = False , a = False , a = None , a = None , a = None , a = False , **a , ) -> Any:
__A : List[str] = get_file_from_repo(
_UpperCAmelCase , _UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , resume_download=_UpperCAmelCase , proxies=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , revision=_UpperCAmelCase , local_files_only=_UpperCAmelCase , )
if resolved_config_file is None:
logger.info(
'Could not locate the feature extractor configuration file, will try to use the model config instead.' )
return {}
with open(_UpperCAmelCase , encoding='utf-8' ) as reader:
return json.load(_UpperCAmelCase )
class _A:
"""simple docstring"""
def __init__( self ):
raise EnvironmentError(
'AutoFeatureExtractor is designed to be instantiated '
'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(_snake_case )
def UpperCAmelCase_ ( cls , _A , **_A ):
__A : str = kwargs.pop('config' , _snake_case )
__A : List[str] = kwargs.pop('trust_remote_code' , _snake_case )
__A : Dict = True
__A , __A : str = FeatureExtractionMixin.get_feature_extractor_dict(_snake_case , **_snake_case )
__A : List[str] = config_dict.get('feature_extractor_type' , _snake_case )
__A : Any = None
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
__A : str = config_dict['auto_map']['AutoFeatureExtractor']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_snake_case , _snake_case ):
__A : Optional[int] = AutoConfig.from_pretrained(_snake_case , **_snake_case )
# It could be in `config.feature_extractor_type``
__A : int = getattr(_snake_case , 'feature_extractor_type' , _snake_case )
if hasattr(_snake_case , 'auto_map' ) and "AutoFeatureExtractor" in config.auto_map:
__A : Any = config.auto_map['AutoFeatureExtractor']
if feature_extractor_class is not None:
__A : List[str] = feature_extractor_class_from_name(_snake_case )
__A : Tuple = feature_extractor_auto_map is not None
__A : Union[str, Any] = feature_extractor_class is not None or type(_snake_case ) in FEATURE_EXTRACTOR_MAPPING
__A : Tuple = resolve_trust_remote_code(
_snake_case , _snake_case , _snake_case , _snake_case )
if has_remote_code and trust_remote_code:
__A : Tuple = get_class_from_dynamic_module(
_snake_case , _snake_case , **_snake_case )
__A : Optional[int] = kwargs.pop('code_revision' , _snake_case )
if os.path.isdir(_snake_case ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_snake_case , **_snake_case )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_snake_case , **_snake_case )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_snake_case ) in FEATURE_EXTRACTOR_MAPPING:
__A : List[str] = FEATURE_EXTRACTOR_MAPPING[type(_snake_case )]
return feature_extractor_class.from_dict(_snake_case , **_snake_case )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def UpperCAmelCase_ ( _A , _A ):
FEATURE_EXTRACTOR_MAPPING.register(_snake_case , _snake_case )
| 239 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class a :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=False , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , use_stable_embedding=_snake_case , )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenLlamaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = OpenLlamaModel(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , )
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , )
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = OpenLlamaForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = OpenLlamaForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
# first forward pass
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , use_cache=_snake_case , )
lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , output_hidden_states=_snake_case , )['hidden_states'][0]
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , past_key_values=_snake_case , output_hidden_states=_snake_case , )['hidden_states'][0]
# select random slice
lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-3 ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a ( a__ , a__ , a__ , unittest.TestCase ):
snake_case__ = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
snake_case__ = (OpenLlamaForCausalLM,) if is_torch_available() else ()
snake_case__ = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = OpenLlamaModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase = type
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(_snake_case )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = 'single_label_classification'
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(_snake_case )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = 'multi_label_classification'
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(_snake_case )
lowerCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = ids_tensor([1, 10] , config.vocab_size )
lowerCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase = OpenLlamaModel(_snake_case )
original_model.to(_snake_case )
original_model.eval()
lowerCAmelCase = original_model(_snake_case ).last_hidden_state
lowerCAmelCase = original_model(_snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase = {'type': scaling_type, 'factor': 10.0}
lowerCAmelCase = OpenLlamaModel(_snake_case )
scaled_model.to(_snake_case )
scaled_model.eval()
lowerCAmelCase = scaled_model(_snake_case ).last_hidden_state
lowerCAmelCase = scaled_model(_snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
| 4 | 0 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def __magic_name__ ( ) -> Optional[Any]:
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 109 |
"""simple docstring"""
from typing import Any
class a :
def __init__( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = data
lowerCAmelCase = None
def __repr__( self ):
"""simple docstring"""
return F'Node({self.data})'
class a :
def __init__( self ):
"""simple docstring"""
lowerCAmelCase = None
def __iter__( self ):
"""simple docstring"""
lowerCAmelCase = self.head
while node:
yield node.data
lowerCAmelCase = node.next
def __len__( self ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ):
"""simple docstring"""
return "->".join([str(_snake_case ) for item in self] )
def __getitem__( self , _snake_case ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , _snake_case , _snake_case ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
lowerCAmelCase = self.head
for _ in range(_snake_case ):
lowerCAmelCase = current.next
lowerCAmelCase = data
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
self.insert_nth(len(self ) , _snake_case )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
self.insert_nth(0 , _snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
lowerCAmelCase = Node(_snake_case )
if self.head is None:
lowerCAmelCase = new_node
elif index == 0:
lowerCAmelCase = self.head # link new_node to head
lowerCAmelCase = new_node
else:
lowerCAmelCase = self.head
for _ in range(index - 1 ):
lowerCAmelCase = temp.next
lowerCAmelCase = temp.next
lowerCAmelCase = new_node
def UpperCamelCase__ ( self ): # print every node data
"""simple docstring"""
print(self )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.delete_nth(0 )
def UpperCamelCase__ ( self ): # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase__ ( self , _snake_case = 0 ):
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
lowerCAmelCase = self.head # default first node
if index == 0:
lowerCAmelCase = self.head.next
else:
lowerCAmelCase = self.head
for _ in range(index - 1 ):
lowerCAmelCase = temp.next
lowerCAmelCase = temp.next
lowerCAmelCase = temp.next.next
return delete_node.data
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.head is None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = None
lowerCAmelCase = self.head
while current:
# Store the current node's next node.
lowerCAmelCase = current.next
# Make the current node's next point backwards
lowerCAmelCase = prev
# Make the previous node be the current node
lowerCAmelCase = current
# Make the current node the next node (to progress iteration)
lowerCAmelCase = next_node
# Return prev in order to put the head at the end
lowerCAmelCase = prev
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = LinkedList()
assert linked_list.is_empty() is True
assert str(_UpperCAmelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_UpperCAmelCase ) == i
linked_list.insert_nth(_UpperCAmelCase , i + 1 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_UpperCAmelCase ) == 9
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCAmelCase = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(-8 , 1 ) )
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = [
-9,
100,
Node(7734_5112 ),
'dlrow olleH',
7,
5555,
0,
-192.5_5555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
lowerCAmelCase = LinkedList()
for i in test_input:
linked_list.insert_tail(_UpperCAmelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_UpperCAmelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCAmelCase = linked_list.delete_head()
assert result == -9
assert (
str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCAmelCase = linked_list.delete_tail()
assert result == 12.2
assert (
str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCAmelCase = linked_list.delete_nth(10 )
assert result is None
assert (
str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_UpperCAmelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_UpperCAmelCase )
assert (
str(_UpperCAmelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_UpperCAmelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _SCREAMING_SNAKE_CASE ():
from doctest import testmod
testmod()
lowerCAmelCase = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_UpperCAmelCase )
print('\nReading/changing Node data using indexing:' )
print(F'Element at Position 1: {linked_list[1]}' )
lowerCAmelCase = input('Enter New Value: ' ).strip()
print('New list:' )
print(_UpperCAmelCase )
print(F'length of linked_list is : {len(_UpperCAmelCase )}' )
if __name__ == "__main__":
main()
| 4 | 0 |
import qiskit
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = qiskit.Aer.get_backend("aer_simulator" )
lowerCamelCase = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
lowerCamelCase = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
a_ : Union[str, Any] = half_adder(1, 1)
print(f"""Half Adder Output Qubit Counts: {counts}""") | 623 |
"""simple docstring"""
from __future__ import annotations
import requests
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
lowerCAmelCase = F'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(_UpperCAmelCase ).json()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 10 ):
lowerCAmelCase = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
lowerCAmelCase = requests.get(_UpperCAmelCase ).json()[:max_stories]
return [get_hackernews_story(_UpperCAmelCase ) for story_id in story_ids]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 10 ):
lowerCAmelCase = hackernews_top_stories(_UpperCAmelCase )
return "\n".join('* [{title}]({url})'.format(**_UpperCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 4 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase :Tuple = {
'''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = ['''VivitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = [
'''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VivitModel''',
'''VivitPreTrainedModel''',
'''VivitForVideoClassification''',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
lowerCamelCase :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any ):
lowerCAmelCase = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCAmelCase = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowerCAmelCase = 4
lowerCAmelCase = 48
lowerCAmelCase = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCAmelCase = [6, 6, 6, 6]
lowerCAmelCase = 60
lowerCAmelCase = [6, 6, 6, 6]
lowerCAmelCase = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCAmelCase = 4
lowerCAmelCase = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowerCAmelCase = 1
lowerCAmelCase = 1
lowerCAmelCase = 126
lowerCAmelCase = 7
lowerCAmelCase = 255.0
lowerCAmelCase = ''
return config
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ):
if "patch_embed.proj" in name and "layers" not in name:
lowerCAmelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
lowerCAmelCase = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
lowerCAmelCase = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
lowerCAmelCase = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
lowerCAmelCase = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowerCAmelCase = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowerCAmelCase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowerCAmelCase = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowerCAmelCase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCAmelCase = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
lowerCAmelCase = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
lowerCAmelCase = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
lowerCAmelCase = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
lowerCAmelCase = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
lowerCAmelCase = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
lowerCAmelCase = 'layernorm.weight'
if name == "norm.bias":
lowerCAmelCase = 'layernorm.bias'
if "conv_first" in name:
lowerCAmelCase = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowerCAmelCase = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowerCAmelCase = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
lowerCAmelCase = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
lowerCAmelCase = name.replace('upsample.2' , 'upsample.convolution_1' )
lowerCAmelCase = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
lowerCAmelCase = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
lowerCAmelCase = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
lowerCAmelCase = 'swin2sr.' + name
return name
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Dict ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase = orig_state_dict.pop(_UpperCAmelCase )
if "qkv" in key:
lowerCAmelCase = key.split('.' )
lowerCAmelCase = int(key_split[1] )
lowerCAmelCase = int(key_split[4] )
lowerCAmelCase = config.embed_dim
if "weight" in key:
lowerCAmelCase = val[:dim, :]
lowerCAmelCase = val[dim : dim * 2, :]
lowerCAmelCase = val[-dim:, :]
else:
lowerCAmelCase = val[:dim]
lowerCAmelCase = val[dim : dim * 2]
lowerCAmelCase = val[-dim:]
pass
else:
lowerCAmelCase = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple ):
lowerCAmelCase = get_config(_UpperCAmelCase )
lowerCAmelCase = SwinaSRForImageSuperResolution(_UpperCAmelCase )
model.eval()
lowerCAmelCase = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location='cpu' )
lowerCAmelCase = convert_state_dict(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase ,lowerCAmelCase = model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
raise ValueError('Missing keys when converting: {}'.format(_UpperCAmelCase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F'Unexpected key {key} in state_dict' )
# verify values
lowerCAmelCase = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
lowerCAmelCase = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('RGB' )
lowerCAmelCase = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowerCAmelCase = 126 if 'Jpeg' in checkpoint_url else 256
lowerCAmelCase = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowerCAmelCase = transforms(_UpperCAmelCase ).unsqueeze(0 )
if config.num_channels == 1:
lowerCAmelCase = pixel_values[:, 0, :, :].unsqueeze(1 )
lowerCAmelCase = model(_UpperCAmelCase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowerCAmelCase = torch.Size([1, 3, 512, 512] )
lowerCAmelCase = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCAmelCase = torch.Size([1, 3, 1024, 1024] )
lowerCAmelCase = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowerCAmelCase = torch.Size([1, 3, 1024, 1024] )
lowerCAmelCase = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCAmelCase = torch.Size([1, 3, 512, 512] )
lowerCAmelCase = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCAmelCase = torch.Size([1, 3, 1024, 1024] )
lowerCAmelCase = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), F'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , _UpperCAmelCase , atol=1e-3 )
print('Looks ok!' )
lowerCAmelCase = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
lowerCAmelCase = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_UpperCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
model.push_to_hub(F'caidas/{model_name}' )
processor.push_to_hub(F'caidas/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
__UpperCamelCase : Optional[int] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class A__ :
'''simple docstring'''
SCREAMING_SNAKE_CASE = BlenderbotSmallConfig
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = 'gelu'
def __init__( self: int , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any]=13 , _SCREAMING_SNAKE_CASE: int=7 , _SCREAMING_SNAKE_CASE: Union[str, Any]=True , _SCREAMING_SNAKE_CASE: Union[str, Any]=False , _SCREAMING_SNAKE_CASE: str=99 , _SCREAMING_SNAKE_CASE: Optional[int]=32 , _SCREAMING_SNAKE_CASE: Optional[int]=2 , _SCREAMING_SNAKE_CASE: Any=4 , _SCREAMING_SNAKE_CASE: Tuple=37 , _SCREAMING_SNAKE_CASE: int=0.1 , _SCREAMING_SNAKE_CASE: int=0.1 , _SCREAMING_SNAKE_CASE: List[Any]=20 , _SCREAMING_SNAKE_CASE: List[str]=2 , _SCREAMING_SNAKE_CASE: int=1 , _SCREAMING_SNAKE_CASE: Dict=0 , ) -> int:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = parent
__lowerCAmelCase : str = batch_size
__lowerCAmelCase : Optional[int] = seq_length
__lowerCAmelCase : Optional[Any] = is_training
__lowerCAmelCase : List[Any] = use_labels
__lowerCAmelCase : Any = vocab_size
__lowerCAmelCase : Union[str, Any] = hidden_size
__lowerCAmelCase : List[str] = num_hidden_layers
__lowerCAmelCase : List[str] = num_attention_heads
__lowerCAmelCase : int = intermediate_size
__lowerCAmelCase : List[str] = hidden_dropout_prob
__lowerCAmelCase : List[Any] = attention_probs_dropout_prob
__lowerCAmelCase : List[Any] = max_position_embeddings
__lowerCAmelCase : List[str] = eos_token_id
__lowerCAmelCase : List[Any] = pad_token_id
__lowerCAmelCase : Optional[Any] = bos_token_id
def _SCREAMING_SNAKE_CASE ( self: int) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
__lowerCAmelCase : List[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
__lowerCAmelCase : str = tf.concat([input_ids, eos_tensor] , axis=1)
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__lowerCAmelCase : Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowerCAmelCase : Optional[int] = prepare_blenderbot_small_inputs_dict(_snake_case , _snake_case , _snake_case)
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> Any:
"""simple docstring"""
__lowerCAmelCase : str = TFBlenderbotSmallModel(config=_snake_case).get_decoder()
__lowerCAmelCase : List[Any] = inputs_dict["input_ids"]
__lowerCAmelCase : List[str] = input_ids[:1, :]
__lowerCAmelCase : List[Any] = inputs_dict["attention_mask"][:1, :]
__lowerCAmelCase : Optional[Any] = inputs_dict["head_mask"]
__lowerCAmelCase : str = 1
# first forward pass
__lowerCAmelCase : List[Any] = model(_snake_case , attention_mask=_snake_case , head_mask=_snake_case , use_cache=_snake_case)
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCAmelCase : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size)
__lowerCAmelCase : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
__lowerCAmelCase : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1)
__lowerCAmelCase : Dict = tf.concat([attention_mask, next_attn_mask] , axis=-1)
__lowerCAmelCase : Tuple = model(_snake_case , attention_mask=_snake_case)[0]
__lowerCAmelCase : Optional[Any] = model(_snake_case , attention_mask=_snake_case , past_key_values=_snake_case)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
__lowerCAmelCase : Any = int(ids_tensor((1,) , output_from_past.shape[-1]))
__lowerCAmelCase : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx]
__lowerCAmelCase : Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_snake_case , _snake_case , rtol=1e-3)
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case=None ,__snake_case=None ,__snake_case=None ,__snake_case=None ,__snake_case=None ,) -> Optional[int]:
if attention_mask is None:
__lowerCAmelCase : int = tf.cast(tf.math.not_equal(_UpperCAmelCase ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
__lowerCAmelCase : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
__lowerCAmelCase : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCAmelCase : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowerCAmelCase : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A__ ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
SCREAMING_SNAKE_CASE = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[Any] = TFBlenderbotSmallModelTester(self)
__lowerCAmelCase : Dict = ConfigTester(self , config_class=_snake_case)
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self: int) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case)
@require_tokenizers
@require_tf
class A__ ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
SCREAMING_SNAKE_CASE = 'facebook/blenderbot_small-90M'
@cached_property
def _SCREAMING_SNAKE_CASE ( self: Dict) -> Optional[Any]:
"""simple docstring"""
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M")
@cached_property
def _SCREAMING_SNAKE_CASE ( self: Dict) -> str:
"""simple docstring"""
__lowerCAmelCase : Any = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
@slow
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> int:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , return_tensors="tf")
__lowerCAmelCase : List[Any] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_snake_case , )
__lowerCAmelCase : Optional[int] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_snake_case)[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
) | 293 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class a ( a__ ):
snake_case__ = '''megatron-bert'''
def __init__( self , _snake_case=2_90_56 , _snake_case=10_24 , _snake_case=24 , _snake_case=16 , _snake_case=40_96 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=0 , _snake_case="absolute" , _snake_case=True , **_snake_case , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , **_snake_case )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
| 4 | 0 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class UpperCAmelCase ( a__ , a__ , a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = StableDiffusionControlNetImgaImgPipeline
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
SCREAMING_SNAKE_CASE = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowerCAmelCase( self ) -> Union[str, Any]:
torch.manual_seed(0 )
lowercase__ : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
lowercase__ : int = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
lowercase__ : Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
lowercase__ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase__ : Union[str, Any] = CLIPTextModel(_snake_case )
lowercase__ : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__ : Tuple = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=0 ) -> List[str]:
if str(_snake_case ).startswith('''mps''' ):
lowercase__ : str = torch.manual_seed(_snake_case )
else:
lowercase__ : str = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowercase__ : Tuple = 2
lowercase__ : Dict = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case ) , )
lowercase__ : Any = floats_tensor(control_image.shape , rng=random.Random(_snake_case ) ).to(_snake_case )
lowercase__ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ : Optional[Any] = Image.fromarray(np.uinta(_snake_case ) ).convert('''RGB''' ).resize((64, 64) )
lowercase__ : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def _lowerCAmelCase( self ) -> Any:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _lowerCAmelCase( self ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def _lowerCAmelCase( self ) -> Optional[Any]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class UpperCAmelCase ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = StableDiffusionControlNetImgaImgPipeline
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def _lowerCAmelCase( self ) -> List[Any]:
torch.manual_seed(0 )
lowercase__ : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(__lowerCAmelCase ):
if isinstance(_snake_case , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
lowercase__ : List[str] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_snake_case )
torch.manual_seed(0 )
lowercase__ : List[str] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_snake_case )
torch.manual_seed(0 )
lowercase__ : List[str] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
lowercase__ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase__ : Optional[Any] = CLIPTextModel(_snake_case )
lowercase__ : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__ : str = MultiControlNetModel([controlneta, controlneta] )
lowercase__ : int = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=0 ) -> List[Any]:
if str(_snake_case ).startswith('''mps''' ):
lowercase__ : List[Any] = torch.manual_seed(_snake_case )
else:
lowercase__ : int = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowercase__ : Tuple = 2
lowercase__ : List[Any] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case ) , ),
]
lowercase__ : Optional[int] = floats_tensor(control_image[0].shape , rng=random.Random(_snake_case ) ).to(_snake_case )
lowercase__ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ : Union[str, Any] = Image.fromarray(np.uinta(_snake_case ) ).convert('''RGB''' ).resize((64, 64) )
lowercase__ : Any = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def _lowerCAmelCase( self ) -> str:
lowercase__ : Tuple = self.get_dummy_components()
lowercase__ : Dict = self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
lowercase__ : List[str] = 1_0.0
lowercase__ : Union[str, Any] = 4
lowercase__ : List[str] = self.get_dummy_inputs(_snake_case )
lowercase__ : Dict = steps
lowercase__ : Any = scale
lowercase__ : Optional[int] = pipe(**_snake_case )[0]
lowercase__ : int = self.get_dummy_inputs(_snake_case )
lowercase__ : Any = steps
lowercase__ : int = scale
lowercase__ : Any = pipe(**_snake_case , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
lowercase__ : int = self.get_dummy_inputs(_snake_case )
lowercase__ : List[str] = steps
lowercase__ : int = scale
lowercase__ : Union[str, Any] = pipe(**_snake_case , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
lowercase__ : Any = self.get_dummy_inputs(_snake_case )
lowercase__ : Dict = steps
lowercase__ : List[str] = scale
lowercase__ : List[str] = pipe(**_snake_case , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def _lowerCAmelCase( self ) -> Optional[Any]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _lowerCAmelCase( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def _lowerCAmelCase( self ) -> List[str]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : Dict = self.get_dummy_components()
lowercase__ : Any = self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_snake_case )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase( self ) -> str:
lowercase__ : Any = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
lowercase__ : List[str] = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=_snake_case , controlnet=_snake_case )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : int = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase__ : Any = '''evil space-punk bird'''
lowercase__ : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) )
lowercase__ : Union[str, Any] = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) )
lowercase__ : int = pipe(
_snake_case , _snake_case , control_image=_snake_case , generator=_snake_case , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
lowercase__ : Union[str, Any] = output.images[0]
assert image.shape == (512, 512, 3)
lowercase__ : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9E-2
| 152 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.