code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ : Any ={
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Tuple =[
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
lowerCAmelCase__ : str =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 101 |
class A__ :
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] ):
a__ : str = name
a__ : Optional[int] = value
a__ : Dict = weight
def __repr__( self : Union[str, Any] ):
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def _UpperCamelCase( self : Dict ):
return self.value
def _UpperCamelCase( self : Optional[Any] ):
return self.name
def _UpperCamelCase( self : Optional[Any] ):
return self.weight
def _UpperCamelCase( self : Optional[int] ):
return self.value / self.weight
def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]:
a__ : Optional[Any] = []
for i in range(len(__a ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def UpperCamelCase_ ( __a , __a , __a ) -> Union[str, Any]:
a__ : List[str] = sorted(__a , key=__a , reverse=__a )
a__ : List[Any] = []
a__, a__ : Union[str, Any] = 0.0, 0.0
for i in range(len(__a ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def UpperCamelCase_ ( ) -> Union[str, Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37 | 0 |
"""simple docstring"""
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_enforce_args(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if n == 0:
return 0
UpperCamelCase : List[str] = float("""-inf""" )
for i in range(1 , n + 1 ):
UpperCamelCase : str = max(
SCREAMING_SNAKE_CASE , prices[i - 1] + naive_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE ) )
return max_revue
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_enforce_args(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
UpperCamelCase : Any = float("""-inf""" )
for i in range(1 , n + 1 ):
UpperCamelCase : str = max(
SCREAMING_SNAKE_CASE , prices[i - 1] + _top_down_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
UpperCamelCase : Any = max_revenue
return max_rev[n]
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_enforce_args(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
UpperCamelCase : str = [float("""-inf""" ) for _ in range(n + 1 )]
UpperCamelCase : List[Any] = 0
for i in range(1 , n + 1 ):
UpperCamelCase : str = max_rev[i]
for j in range(1 , i + 1 ):
UpperCamelCase : Union[str, Any] = max(SCREAMING_SNAKE_CASE , prices[j - 1] + max_rev[i - j] )
UpperCamelCase : Any = max_revenue_i
return max_rev[n]
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if n < 0:
UpperCamelCase : List[Any] = f"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(SCREAMING_SNAKE_CASE )
if n > len(SCREAMING_SNAKE_CASE ):
UpperCamelCase : Dict = (
"""Each integral piece of rod must have a corresponding price. """
f"""Got n = {n} but length of prices = {len(SCREAMING_SNAKE_CASE )}"""
)
raise ValueError(SCREAMING_SNAKE_CASE )
def UpperCamelCase ():
UpperCamelCase : Tuple = [6, 10, 12, 15, 20, 23]
UpperCamelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
UpperCamelCase : Optional[int] = 36
UpperCamelCase : Optional[int] = top_down_cut_rod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase : str = bottom_up_cut_rod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase : int = naive_cut_rod_recursive(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 102 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class A__ ( A__ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase__ : Union[str, "sqlalchemy.sql.Selectable"] , lowerCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , **lowerCamelCase__ : Optional[int] , ):
super().__init__(features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , **lowerCamelCase__ )
a__ : str = Sql(
cache_dir=lowerCamelCase__ , features=lowerCamelCase__ , sql=lowerCamelCase__ , con=lowerCamelCase__ , **lowerCamelCase__ , )
def _UpperCamelCase( self : Tuple ):
a__ : Optional[Any] = None
a__ : Dict = None
a__ : Union[str, Any] = None
a__ : Union[str, Any] = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , )
# Build dataset for splits
a__ : List[str] = self.builder.as_dataset(
split="train" , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory )
return dataset
class A__ :
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase__ : Dataset , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : Optional[Any] , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
a__ : Any = dataset
a__ : str = name
a__ : Tuple = con
a__ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
a__ : Any = num_proc
a__ : Tuple = to_sql_kwargs
def _UpperCamelCase( self : List[Any] ):
a__ : Any = self.to_sql_kwargs.pop("sql" , lowerCamelCase__ )
a__ : int = self.to_sql_kwargs.pop("con" , lowerCamelCase__ )
a__ : int = self.to_sql_kwargs.pop("index" , lowerCamelCase__ )
a__ : int = self._write(index=lowerCamelCase__ , **self.to_sql_kwargs )
return written
def _UpperCamelCase( self : Any , lowerCamelCase__ : List[str] ):
a__, a__, a__ : Union[str, Any] = args
a__ : Any = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
a__ : Tuple = query_table(
table=self.dataset.data , key=slice(lowerCamelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , )
a__ : str = batch.to_pandas()
a__ : List[Any] = df.to_sql(self.name , self.con , index=lowerCamelCase__ , **lowerCamelCase__ )
return num_rows or len(lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ):
a__ : str = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
a__, a__ : List[str] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , lowerCamelCase__ , lowerCamelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 37 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,unittest.TestCase ):
A__ : Optional[int] = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Dict=0 ):
"""simple docstring"""
_snake_case = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(__lowerCamelCase ) )
_snake_case = np.random.RandomState(__lowerCamelCase )
_snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.7_5,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**__lowerCamelCase ).images
_snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_snake_case = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**__lowerCamelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_snake_case = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
# warmup pass to apply optimizations
_snake_case = pipe(**self.get_dummy_inputs() )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**__lowerCamelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_snake_case = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**__lowerCamelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**__lowerCamelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**__lowerCamelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_snake_case = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
@property
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case = ort.SessionOptions()
_snake_case = False
return options
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_snake_case = init_image.resize((7_6_8, 5_1_2) )
# using the PNDM scheduler by default
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=__lowerCamelCase , feature_extractor=__lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = '''A fantasy landscape, trending on artstation'''
_snake_case = np.random.RandomState(0 )
_snake_case = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__lowerCamelCase , output_type='''np''' , )
_snake_case = output.images
_snake_case = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
_snake_case = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_snake_case = init_image.resize((7_6_8, 5_1_2) )
_snake_case = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=__lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = '''A fantasy landscape, trending on artstation'''
_snake_case = np.random.RandomState(0 )
_snake_case = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__lowerCamelCase , output_type='''np''' , )
_snake_case = output.images
_snake_case = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
_snake_case = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 103 |
import math
from datetime import datetime, timedelta
def UpperCamelCase_ ( __a ) -> datetime:
a__ : Union[str, Any] = year % 19
a__ : List[str] = year % 4
a__ : str = year % 7
a__ : Any = math.floor(year / 100 )
a__ : List[str] = math.floor((13 + 8 * leap_day_inhibits) / 25 )
a__ : Optional[int] = leap_day_inhibits / 4
a__ : Union[str, Any] = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
a__ : Dict = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
a__ : Any = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
a__ : List[Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__a , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__a , 4 , 18 )
else:
return datetime(__a , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
UpperCamelCase : Tuple = """will be""" if year > datetime.now().year else """was"""
print(f"""Easter in {year} {tense} {gauss_easter(year)}""")
| 37 | 0 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
vae=SCREAMING_SNAKE_CASE__ , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ = "auto" ) -> int:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
A__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Union[str, Any]:
self.enable_attention_slicing(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 512 , SCREAMING_SNAKE_CASE__ = 512 , SCREAMING_SNAKE_CASE__ = 50 , SCREAMING_SNAKE_CASE__ = 7.5 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = 0.0 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "pil" , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> Dict:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A__ = 1
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A__ = len(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE__ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(SCREAMING_SNAKE_CASE__ )}.""" )
# get prompt text embeddings
A__ = self.tokenizer(
SCREAMING_SNAKE_CASE__ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
A__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
A__ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
A__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A__ , A__ , A__ = text_embeddings.shape
A__ = text_embeddings.repeat(1 , SCREAMING_SNAKE_CASE__ , 1 )
A__ = text_embeddings.view(bs_embed * num_images_per_prompt , SCREAMING_SNAKE_CASE__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A__ = 42
if negative_prompt is None:
A__ = [""]
elif type(SCREAMING_SNAKE_CASE__ ) is not type(SCREAMING_SNAKE_CASE__ ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(SCREAMING_SNAKE_CASE__ )} !="""
f""" {type(SCREAMING_SNAKE_CASE__ )}.""" )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A__ = [negative_prompt]
elif batch_size != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(SCREAMING_SNAKE_CASE__ )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
A__ = negative_prompt
A__ = text_input_ids.shape[-1]
A__ = self.tokenizer(
SCREAMING_SNAKE_CASE__ , padding="max_length" , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , return_tensors="pt" , )
A__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A__ = uncond_embeddings.shape[1]
A__ = uncond_embeddings.repeat(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
A__ = uncond_embeddings.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
A__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A__ = torch.randn(
SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device="cpu" , dtype=SCREAMING_SNAKE_CASE__ ).to(self.device )
A__ = torch.randn(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device="cpu" , dtype=SCREAMING_SNAKE_CASE__ ).to(
self.device )
else:
A__ = torch.randn(
SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=self.device , dtype=SCREAMING_SNAKE_CASE__ )
A__ = torch.randn(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=self.device , dtype=SCREAMING_SNAKE_CASE__ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
A__ = latents_reference.to(self.device )
A__ = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
A__ = (latents_shape[3] - latents_shape_reference[3]) // 2
A__ = (latents_shape[2] - latents_shape_reference[2]) // 2
A__ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
A__ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
A__ = 0 if dx < 0 else dx
A__ = 0 if dy < 0 else dy
A__ = max(-dx , 0 )
A__ = max(-dy , 0 )
# import pdb
# pdb.set_trace()
A__ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A__ = {}
if accepts_eta:
A__ = eta
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the latents if we are doing classifier free guidance
A__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A__ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# predict the noise residual
A__ = self.unet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ ).sample
# perform guidance
if do_classifier_free_guidance:
A__ , A__ = noise_pred.chunk(2 )
A__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A__ = self.scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = 1 / 0.1_8_2_1_5 * latents
A__ = self.vae.decode(SCREAMING_SNAKE_CASE__ ).sample
A__ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
A__ = self.feature_extractor(self.numpy_to_pil(SCREAMING_SNAKE_CASE__ ) , return_tensors="pt" ).to(
self.device )
A__ , A__ = self.safety_checker(
images=SCREAMING_SNAKE_CASE__ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
A__ = None
if output_type == "pil":
A__ = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE__ , nsfw_content_detected=SCREAMING_SNAKE_CASE__ )
| 104 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def UpperCamelCase_ ( __a ) -> Union[str, Any]:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase__ : nn.Module , lowerCamelCase__ : int ):
super().__init__()
a__ : int = module
a__ : Any = nn.Sequential(
nn.Linear(module.in_features , lowerCamelCase__ , bias=lowerCamelCase__ ) , nn.Linear(lowerCamelCase__ , module.out_features , bias=lowerCamelCase__ ) , )
a__ : Tuple = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCamelCase__ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , *lowerCamelCase__ : int , **lowerCamelCase__ : Dict ):
return self.module(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) + self.adapter(lowerCamelCase__ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
"""simple docstring"""
_lowercase = 'bigscience/bloom-1b7'
# Constant values
_lowercase = 2.1_09_65_95_52_69_25_74
_lowercase = 'Hello my name is'
_lowercase = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
_lowercase = 1_0
def _UpperCamelCase( self : Dict ):
# Models and tokenizer
a__ : List[str] = AutoTokenizer.from_pretrained(self.model_name )
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : Union[str, Any] ):
super().setUp()
# Models and tokenizer
a__ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
a__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
def _UpperCamelCase( self : List[Any] ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase( self : List[Any] ):
a__ : str = self.model_abit.config
self.assertTrue(hasattr(lowerCamelCase__ , "quantization_config" ) )
a__ : Optional[Any] = config.to_dict()
a__ : int = config.to_diff_dict()
a__ : List[str] = config.to_json_string()
def _UpperCamelCase( self : int ):
from bitsandbytes.nn import Paramsabit
a__ : List[Any] = self.model_fpaa.get_memory_footprint()
a__ : str = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
a__ : Optional[Any] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def _UpperCamelCase( self : Tuple ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCamelCase__ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def _UpperCamelCase( self : str ):
a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" )
a__ : Tuple = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS )
def _UpperCamelCase( self : List[Any] ):
a__ : Optional[Any] = BitsAndBytesConfig()
a__ : Optional[int] = True
a__ : int = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCamelCase__ , device_map="auto" )
a__ : str = self.tokenizer(self.input_text , return_tensors="pt" )
a__ : int = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS )
def _UpperCamelCase( self : Dict ):
with self.assertRaises(lowerCamelCase__ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCamelCase__ )
def _UpperCamelCase( self : Union[str, Any] ):
a__ : int = BitsAndBytesConfig()
with self.assertRaises(lowerCamelCase__ ):
a__ : Dict = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCamelCase__ , load_in_abit=lowerCamelCase__ , device_map="auto" , bnb_abit_quant_type="nf4" , )
def _UpperCamelCase( self : int ):
with self.assertRaises(lowerCamelCase__ ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(lowerCamelCase__ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCamelCase__ ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(lowerCamelCase__ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCamelCase__ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
a__ : int = self.tokenizer(self.input_text , return_tensors="pt" )
a__ : Any = self.model_fpaa.to(torch.floataa )
a__ : List[Any] = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
a__ : Tuple = self.model_fpaa.to("cpu" )
# Check this does not throw an error
a__ : Tuple = self.model_fpaa.half()
# Check this does not throw an error
a__ : Dict = self.model_fpaa.float()
def _UpperCamelCase( self : Dict ):
a__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=lowerCamelCase__ , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def _UpperCamelCase( cls : str ):
a__ : Dict = "t5-small"
a__ : List[Any] = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
a__ : int = AutoTokenizer.from_pretrained(cls.model_name )
a__ : str = "Translate in German: Hello, my dog is cute"
def _UpperCamelCase( self : Optional[int] ):
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase( self : Optional[int] ):
from transformers import TaForConditionalGeneration
a__ : List[Any] = TaForConditionalGeneration._keep_in_fpaa_modules
a__ : Optional[Any] = None
# test with `t5-small`
a__ : Any = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a__ : int = model.generate(**lowerCamelCase__ )
# test with `flan-t5-small`
a__ : Dict = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a__ : Any = model.generate(**lowerCamelCase__ )
a__ : Union[str, Any] = modules
def _UpperCamelCase( self : List[Any] ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
a__ : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
a__ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a__ : int = model.generate(**lowerCamelCase__ )
# test with `flan-t5-small`
a__ : int = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
a__ : Any = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a__ : Optional[int] = model.generate(**lowerCamelCase__ )
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : List[str] ):
super().setUp()
# model_name
a__ : Union[str, Any] = "bigscience/bloom-560m"
a__ : Union[str, Any] = "t5-small"
# Different types of model
a__ : int = AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
# Sequence classification model
a__ : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
# CausalLM model
a__ : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
# Seq2seq model
a__ : Dict = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
def _UpperCamelCase( self : List[Any] ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase( self : Union[str, Any] ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : Dict ):
super().setUp()
def _UpperCamelCase( self : int ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase( self : Tuple ):
a__ : int = pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
a__ : Tuple = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : Tuple ):
super().setUp()
def _UpperCamelCase( self : List[Any] ):
a__ : str = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCamelCase__ , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
a__ : List[Any] = self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
a__ : List[Any] = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS )
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : Dict ):
a__ : Any = "facebook/opt-350m"
super().setUp()
def _UpperCamelCase( self : int ):
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
a__ : Tuple = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
a__ : Any = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
a__ : Tuple = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCamelCase__ ) ):
a__ : Dict = LoRALayer(module.q_proj , rank=16 )
a__ : List[Any] = LoRALayer(module.k_proj , rank=16 )
a__ : List[Any] = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
a__ : Dict = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
a__ : Optional[Any] = model.forward(**lowerCamelCase__ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCamelCase__ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'gpt2-xl'
_lowercase = 3.31_91_85_48_54_15_21_87
| 37 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
UpperCamelCase__ : str = random.Random()
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=1.0 , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Dict=None ) -> Dict:
"""simple docstring"""
if rng is None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = global_rng
SCREAMING_SNAKE_CASE_ : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self ,snake_case__ ,snake_case__=7 ,snake_case__=400 ,snake_case__=2000 ,snake_case__=2048 ,snake_case__=128 ,snake_case__=1 ,snake_case__=512 ,snake_case__=30 ,snake_case__=44100 ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE_ : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = min_seq_length
SCREAMING_SNAKE_CASE_ : Any = max_seq_length
SCREAMING_SNAKE_CASE_ : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE_ : Tuple = spectrogram_length
SCREAMING_SNAKE_CASE_ : Tuple = feature_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_audio_channels
SCREAMING_SNAKE_CASE_ : int = hop_length
SCREAMING_SNAKE_CASE_ : str = chunk_length
SCREAMING_SNAKE_CASE_ : Optional[Any] = sampling_rate
def snake_case ( self ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def snake_case ( self ,snake_case__=False ,snake_case__=False ):
def _flatten(snake_case__ ):
return list(itertools.chain(*snake_case__ ) )
if equal_length:
SCREAMING_SNAKE_CASE_ : Any = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE_ : List[str] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE_ : Optional[int] = [np.asarray(snake_case__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : int = TvltFeatureExtractor
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = TvltFeatureExtractionTester(self )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(snake_case__ ,'spectrogram_length' ) )
self.assertTrue(hasattr(snake_case__ ,'feature_size' ) )
self.assertTrue(hasattr(snake_case__ ,'num_audio_channels' ) )
self.assertTrue(hasattr(snake_case__ ,'hop_length' ) )
self.assertTrue(hasattr(snake_case__ ,'chunk_length' ) )
self.assertTrue(hasattr(snake_case__ ,'sampling_rate' ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = feat_extract_first.save_pretrained(snake_case__ )[0]
check_json_file_has_correct_format(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.feature_extraction_class.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE_ : List[str] = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE_ : int = dict_first.pop('mel_filters' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(snake_case__ ,snake_case__ ) )
self.assertEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(snake_case__ ,'feat_extract.json' )
feat_extract_first.to_json_file(snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.feature_extraction_class.from_json_file(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE_ : List[str] = dict_first.pop('mel_filters' )
SCREAMING_SNAKE_CASE_ : Optional[int] = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(snake_case__ ,snake_case__ ) )
self.assertEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
# Initialize feature_extractor
SCREAMING_SNAKE_CASE_ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE_ : int = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [np.asarray(snake_case__ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE_ : Dict = feature_extractor(np_speech_inputs[0] ,return_tensors='np' ,sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
SCREAMING_SNAKE_CASE_ : Union[str, Any] = feature_extractor(snake_case__ ,return_tensors='np' ,sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
SCREAMING_SNAKE_CASE_ : Optional[int] = feature_extractor(
snake_case__ ,return_tensors='np' ,sampling_rate=44100 ,mask_audio=snake_case__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE_ : int = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE_ : Any = np.asarray(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = feature_extractor(snake_case__ ,return_tensors='np' ,sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = load_dataset('hf-internal-testing/librispeech_asr_dummy' ,'clean' ,split='validation' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE_ : Dict = ds.sort('id' ).select(range(snake_case__ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE_ : Tuple = TvltFeatureExtractor()
SCREAMING_SNAKE_CASE_ : List[str] = feature_extractor(snake_case__ ,return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape ,(1, 1, 192, 128) )
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] ,snake_case__ ,atol=1E-4 ) )
| 105 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class A__ :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int]=100 , lowerCamelCase__ : str=13 , lowerCamelCase__ : Optional[int]=30 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : int=32 , lowerCamelCase__ : Union[str, Any]=4 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Union[str, Any]=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Union[str, Any]=10 , lowerCamelCase__ : str=0.02 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]=[0, 1, 2, 3] , ):
a__ : Dict = parent
a__ : Dict = 100
a__ : Optional[int] = batch_size
a__ : Union[str, Any] = image_size
a__ : Any = patch_size
a__ : Optional[Any] = num_channels
a__ : int = is_training
a__ : List[str] = use_labels
a__ : Optional[Any] = hidden_size
a__ : List[Any] = num_hidden_layers
a__ : str = num_attention_heads
a__ : str = intermediate_size
a__ : int = hidden_act
a__ : List[Any] = hidden_dropout_prob
a__ : Dict = attention_probs_dropout_prob
a__ : Union[str, Any] = type_sequence_label_size
a__ : Optional[Any] = initializer_range
a__ : List[str] = scope
a__ : int = out_indices
a__ : List[str] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a__ : Optional[int] = (image_size // patch_size) ** 2
a__ : Union[str, Any] = num_patches + 1
def _UpperCamelCase( self : int ):
a__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ : Optional[Any] = None
a__ : Tuple = None
if self.use_labels:
a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a__ : Optional[int] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _UpperCamelCase( self : Tuple ):
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any ):
a__ : str = BeitModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ):
a__ : int = BeitForMaskedImageModeling(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : List[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _UpperCamelCase( self : str , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any ):
a__ : List[str] = self.type_sequence_label_size
a__ : Optional[Any] = BeitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : str = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a__ : Optional[Any] = 1
a__ : List[str] = BeitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a__ : Union[str, Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCamelCase( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ):
a__ : int = self.num_labels
a__ : List[str] = BeitForSemanticSegmentation(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : Tuple = model(lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
a__ : str = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _UpperCamelCase( self : Optional[int] ):
a__ : Any = self.prepare_config_and_inputs()
a__, a__, a__, a__ : Union[str, Any] = config_and_inputs
a__ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
_lowercase = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
def _UpperCamelCase( self : Any ):
a__ : int = BeitModelTester(self )
a__ : Optional[Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def _UpperCamelCase( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def _UpperCamelCase( self : str ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def _UpperCamelCase( self : Dict ):
pass
def _UpperCamelCase( self : Optional[Any] ):
a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : List[str] = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def _UpperCamelCase( self : str ):
a__, a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : int = model_class(lowerCamelCase__ )
a__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Optional[int] = [*signature.parameters.keys()]
a__ : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
a__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _UpperCamelCase( self : int ):
a__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
a__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] ):
a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
if not self.model_tester.is_training:
return
a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a__ : str = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling]:
continue
a__ : List[str] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
a__ : Any = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
a__ : Tuple = model(**lowerCamelCase__ ).loss
loss.backward()
def _UpperCamelCase( self : Tuple ):
a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
a__ : List[Any] = False
a__ : List[str] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
a__ : Optional[Any] = model_class(lowerCamelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase__ )
model.train()
a__ : Union[str, Any] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
a__ : int = model(**lowerCamelCase__ ).loss
loss.backward()
def _UpperCamelCase( self : List[str] ):
a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Dict = _config_zero_init(lowerCamelCase__ )
for model_class in self.all_model_classes:
a__ : str = model_class(config=lowerCamelCase__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def _UpperCamelCase( self : Optional[int] ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Tuple = BeitModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def UpperCamelCase_ ( ) -> Any:
a__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase( self : Optional[int] ):
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def _UpperCamelCase( self : str ):
a__ : int = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(lowerCamelCase__ )
a__ : Optional[Any] = self.default_image_processor
a__ : Dict = prepare_img()
a__ : Optional[int] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).pixel_values.to(lowerCamelCase__ )
# prepare bool_masked_pos
a__ : Optional[Any] = torch.ones((1, 196) , dtype=torch.bool ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Any = model(pixel_values=lowerCamelCase__ , bool_masked_pos=lowerCamelCase__ )
a__ : Tuple = outputs.logits
# verify the logits
a__ : List[str] = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape , lowerCamelCase__ )
a__ : Optional[int] = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , lowerCamelCase__ , atol=1E-2 ) )
@slow
def _UpperCamelCase( self : Dict ):
a__ : str = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(lowerCamelCase__ )
a__ : int = self.default_image_processor
a__ : List[Any] = prepare_img()
a__ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Union[str, Any] = model(**lowerCamelCase__ )
a__ : List[str] = outputs.logits
# verify the logits
a__ : Union[str, Any] = torch.Size((1, 1_000) )
self.assertEqual(logits.shape , lowerCamelCase__ )
a__ : int = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
a__ : Tuple = 281
self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ )
@slow
def _UpperCamelCase( self : Any ):
a__ : Dict = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
lowerCamelCase__ )
a__ : str = self.default_image_processor
a__ : List[str] = prepare_img()
a__ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Dict = model(**lowerCamelCase__ )
a__ : List[str] = outputs.logits
# verify the logits
a__ : Optional[int] = torch.Size((1, 21_841) )
self.assertEqual(logits.shape , lowerCamelCase__ )
a__ : Optional[Any] = torch.tensor([1.6881, -0.2787, 0.5901] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
a__ : Optional[Any] = 2_396
self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ )
@slow
def _UpperCamelCase( self : int ):
a__ : Optional[Any] = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
a__ : Tuple = model.to(lowerCamelCase__ )
a__ : List[Any] = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ )
a__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
a__ : Union[str, Any] = Image.open(ds[0]["file"] )
a__ : List[Any] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Optional[Any] = model(**lowerCamelCase__ )
a__ : List[str] = outputs.logits
# verify the logits
a__ : Tuple = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , lowerCamelCase__ )
a__ : int = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
a__ : Dict = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=lowerCamelCase__ , )
else:
a__ : Dict = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=lowerCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def _UpperCamelCase( self : Tuple ):
a__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
a__ : List[Any] = model.to(lowerCamelCase__ )
a__ : int = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ )
a__ : Optional[int] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
a__ : str = Image.open(ds[0]["file"] )
a__ : str = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : List[Any] = model(**lowerCamelCase__ )
a__ : Any = outputs.logits.detach().cpu()
a__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ , target_sizes=[(500, 300)] )
a__ : Optional[int] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
a__ : List[str] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ )
a__ : Any = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
| 37 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__snake_case :List[str] ={
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :str =['MobileViTFeatureExtractor']
__snake_case :List[Any] =['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Optional[Any] =[
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Optional[int] =[
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__snake_case :List[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 106 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCamelCase : Dict = logging.get_logger(__name__)
def UpperCamelCase_ ( __a ) -> Union[str, Any]:
a__ : Tuple = R"\w+[.]\d+"
a__ : List[Any] = re.findall(__a , __a )
for pat in pats:
a__ : Union[str, Any] = key.replace(__a , "_".join(pat.split("." ) ) )
return key
def UpperCamelCase_ ( __a , __a , __a ) -> List[str]:
a__ : List[str] = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
a__ : Any = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
a__ : Optional[Any] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
a__ : Union[str, Any] = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
a__ : List[str] = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
a__ : str = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
a__ : Tuple = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
a__ : Tuple = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
a__ : Optional[Any] = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
a__ : Union[str, Any] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCamelCase_ ( __a , __a , __a=42 ) -> str:
# Step 1: Convert pytorch tensor to numpy
a__ : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
a__ : Tuple = flax_model.init_weights(PRNGKey(__a ) )
a__ : Optional[Any] = flatten_dict(__a )
a__ : Union[str, Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
a__ : Optional[int] = rename_key(__a )
a__ : Optional[int] = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
a__, a__ : Union[str, Any] = rename_key_and_reshape_tensor(__a , __a , __a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
a__ : str = jnp.asarray(__a )
return unflatten_dict(__a )
| 37 | 0 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : Union[str, Any] = logging.get_logger()
@dataclass
class lowercase_ :
"""simple docstring"""
__lowerCAmelCase = 42
__lowerCAmelCase = field(default_factory=_UpperCamelCase )
__lowerCAmelCase = field(default_factory=_UpperCamelCase )
def __UpperCAmelCase ( self : Any, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Tensor, UpperCamelCase__ : Tensor ) -> Any:
_A = len(list(m.modules() ) ) == 1 or isinstance(UpperCamelCase__, nn.Convad ) or isinstance(UpperCamelCase__, nn.BatchNormad )
if has_not_submodules:
self.traced.append(UpperCamelCase__ )
def __call__( self : List[str], UpperCamelCase__ : Tensor ) -> Union[str, Any]:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(UpperCamelCase__ )
[x.remove() for x in self.handles]
return self
@property
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda UpperCamelCase__ : len(list(x.state_dict().keys() ) ) > 0, self.traced ) )
@dataclass
class lowercase_ :
"""simple docstring"""
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 1
__lowerCAmelCase = field(default_factory=_UpperCamelCase )
__lowerCAmelCase = field(default_factory=_UpperCamelCase )
__lowerCAmelCase = True
def __call__( self : List[Any], UpperCamelCase__ : Tensor ) -> int:
_A = Tracker(self.dest )(UpperCamelCase__ ).parametrized
_A = Tracker(self.src )(UpperCamelCase__ ).parametrized
_A = list(filter(lambda UpperCamelCase__ : type(UpperCamelCase__ ) not in self.src_skip, UpperCamelCase__ ) )
_A = list(filter(lambda UpperCamelCase__ : type(UpperCamelCase__ ) not in self.dest_skip, UpperCamelCase__ ) )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ) and self.raise_if_mismatch:
raise Exception(
f'Numbers of operations are different. Source module has {len(UpperCamelCase__ )} operations while'
f' destination module has {len(UpperCamelCase__ )}.' )
for dest_m, src_m in zip(UpperCamelCase__, UpperCamelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
class lowercase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any, UpperCamelCase__ : nn.Module ) -> Dict:
super().__init__()
_A = []
# - get the stem
feature_blocks.append(('conv1', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block' ), f'Unexpected layer name {k}'
_A = len(UpperCamelCase__ ) + 1
feature_blocks.append((f'res{block_index}', v) )
_A = nn.ModuleDict(UpperCamelCase__ )
def __UpperCAmelCase ( self : Tuple, UpperCamelCase__ : Tensor ) -> Tuple:
return get_trunk_forward_outputs(
UpperCamelCase__, out_feat_keys=UpperCamelCase__, feature_blocks=self._feature_blocks, )
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Tuple, UpperCamelCase__ : str ) -> str:
_A = x.split('-' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : List[str], UpperCamelCase__ : str ) -> Callable[[], Tuple[nn.Module, Dict]]:
# default to timm!
if x not in self:
_A = self.convert_name_to_timm(UpperCamelCase__ )
_A = partial(lambda: (timm.create_model(UpperCamelCase__, pretrained=UpperCamelCase__ ).eval(), None) )
else:
_A = super().__getitem__(UpperCamelCase__ )
return val
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
def __getitem__( self : Optional[int], UpperCamelCase__ : str ) -> Callable[[], nn.Module]:
if "seer" in x and "in1k" not in x:
_A = RegNetModel
else:
_A = RegNetForImageClassification
return val
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : List[Tuple[str, str]] ):
for from_key, to_key in keys:
_A = from_state_dict[from_key].clone()
print(F'Copied key={from_key} to={to_key}' )
return to_state_dict
def _SCREAMING_SNAKE_CASE ( __snake_case : str , __snake_case : Callable[[], nn.Module] , __snake_case : Callable[[], nn.Module] , __snake_case : RegNetConfig , __snake_case : Path , __snake_case : bool = True , ):
print(F'Converting {name}...' )
with torch.no_grad():
_A , _A = from_model_func()
_A = our_model_func(__snake_case ).eval()
_A = ModuleTransfer(src=__snake_case , dest=__snake_case , raise_if_mismatch=__snake_case )
_A = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(__snake_case )
if from_state_dict is not None:
_A = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
_A = [('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')]
_A = manually_copy_vissl_head(__snake_case , our_model.state_dict() , __snake_case )
our_model.load_state_dict(__snake_case )
_A = our_model(__snake_case , output_hidden_states=__snake_case )
_A = (
our_outputs.logits if isinstance(__snake_case , __snake_case ) else our_outputs.last_hidden_state
)
_A = from_model(__snake_case )
_A = from_output[-1] if type(__snake_case ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
_A = our_outputs.hidden_states[-1]
assert torch.allclose(__snake_case , __snake_case ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add model' , use_temp_dir=__snake_case , )
_A = 2_2_4 if 'seer' not in name else 3_8_4
# we can use the convnext one
_A = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' , size=__snake_case )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add image processor' , use_temp_dir=__snake_case , )
print(F'Pushed {name}' )
def _SCREAMING_SNAKE_CASE ( __snake_case : Path , __snake_case : str = None , __snake_case : bool = True ):
_A = 'imagenet-1k-id2label.json'
_A = 1_0_0_0
_A = (1, num_labels)
_A = 'huggingface/label-files'
_A = num_labels
_A = json.load(open(cached_download(hf_hub_url(__snake_case , __snake_case , repo_type='dataset' ) ) , 'r' ) )
_A = {int(__snake_case ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
_A = partial(__snake_case , num_labels=__snake_case , idalabel=__snake_case , labelaid=__snake_case )
_A = {
'regnet-x-002': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 , layer_type='x' ),
'regnet-x-004': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 1_2] , hidden_sizes=[3_2, 6_4, 1_6_0, 3_8_4] , groups_width=1_6 , layer_type='x' ),
'regnet-x-006': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[4_8, 9_6, 2_4_0, 5_2_8] , groups_width=2_4 , layer_type='x' ),
'regnet-x-008': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[6_4, 1_2_8, 2_8_8, 6_7_2] , groups_width=1_6 , layer_type='x' ),
'regnet-x-016': ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 2] , hidden_sizes=[7_2, 1_6_8, 4_0_8, 9_1_2] , groups_width=2_4 , layer_type='x' ),
'regnet-x-032': ImageNetPreTrainedConfig(
depths=[2, 6, 1_5, 2] , hidden_sizes=[9_6, 1_9_2, 4_3_2, 1_0_0_8] , groups_width=4_8 , layer_type='x' ),
'regnet-x-040': ImageNetPreTrainedConfig(
depths=[2, 5, 1_4, 2] , hidden_sizes=[8_0, 2_4_0, 5_6_0, 1_3_6_0] , groups_width=4_0 , layer_type='x' ),
'regnet-x-064': ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 3_9_2, 7_8_4, 1_6_2_4] , groups_width=5_6 , layer_type='x' ),
'regnet-x-080': ImageNetPreTrainedConfig(
depths=[2, 5, 1_5, 1] , hidden_sizes=[8_0, 2_4_0, 7_2_0, 1_9_2_0] , groups_width=1_2_0 , layer_type='x' ),
'regnet-x-120': ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 , layer_type='x' ),
'regnet-x-160': ImageNetPreTrainedConfig(
depths=[2, 6, 1_3, 1] , hidden_sizes=[2_5_6, 5_1_2, 8_9_6, 2_0_4_8] , groups_width=1_2_8 , layer_type='x' ),
'regnet-x-320': ImageNetPreTrainedConfig(
depths=[2, 7, 1_3, 1] , hidden_sizes=[3_3_6, 6_7_2, 1_3_4_4, 2_5_2_0] , groups_width=1_6_8 , layer_type='x' ),
# y variant
'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 ),
'regnet-y-004': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[4_8, 1_0_4, 2_0_8, 4_4_0] , groups_width=8 ),
'regnet-y-006': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[4_8, 1_1_2, 2_5_6, 6_0_8] , groups_width=1_6 ),
'regnet-y-008': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[6_4, 1_2_8, 3_2_0, 7_6_8] , groups_width=1_6 ),
'regnet-y-016': ImageNetPreTrainedConfig(
depths=[2, 6, 1_7, 2] , hidden_sizes=[4_8, 1_2_0, 3_3_6, 8_8_8] , groups_width=2_4 ),
'regnet-y-032': ImageNetPreTrainedConfig(
depths=[2, 5, 1_3, 1] , hidden_sizes=[7_2, 2_1_6, 5_7_6, 1_5_1_2] , groups_width=2_4 ),
'regnet-y-040': ImageNetPreTrainedConfig(
depths=[2, 6, 1_2, 2] , hidden_sizes=[1_2_8, 1_9_2, 5_1_2, 1_0_8_8] , groups_width=6_4 ),
'regnet-y-064': ImageNetPreTrainedConfig(
depths=[2, 7, 1_4, 2] , hidden_sizes=[1_4_4, 2_8_8, 5_7_6, 1_2_9_6] , groups_width=7_2 ),
'regnet-y-080': ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 4_4_8, 8_9_6, 2_0_1_6] , groups_width=5_6 ),
'regnet-y-120': ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 ),
'regnet-y-160': ImageNetPreTrainedConfig(
depths=[2, 4, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 1_2_3_2, 3_0_2_4] , groups_width=1_1_2 ),
'regnet-y-320': ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
'regnet-y-1280-seer': RegNetConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
'regnet-y-2560-seer': RegNetConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
'regnet-y-10b-seer': ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
# finetuned on imagenet
'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
}
_A = NameToOurModelFuncMap()
_A = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(__snake_case : str , __snake_case : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
_A = torch.hub.load_state_dict_from_url(__snake_case , model_dir=str(__snake_case ) , map_location='cpu' )
_A = model_func()
# check if we have a head, if yes add it
_A = files['classy_state_dict']['base_model']['model']
_A = model_state_dict['trunk']
model.load_state_dict(__snake_case )
return model.eval(), model_state_dict["heads"]
# pretrained
_A = partial(
__snake_case , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_A = partial(
__snake_case , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_A = partial(
__snake_case , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
_A = partial(
__snake_case , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=6_20.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
_A = partial(
__snake_case , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_A = partial(
__snake_case , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_A = partial(
__snake_case , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
_A = partial(
__snake_case , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=6_20.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
__snake_case , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , __snake_case , __snake_case , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
__snake_case , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , __snake_case , __snake_case , __snake_case , )
return config, expected_shape
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported regnet* architecture,'''
''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
_UpperCAmelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 107 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCamelCase_ ( ) -> int:
a__ : Any = HfArgumentParser(__a )
a__ : Any = parser.parse_args_into_dataclasses()[0]
a__ : Optional[int] = TensorFlowBenchmark(args=__a )
try:
a__ : Optional[int] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
a__ : Tuple = "Arg --no_{0} is no longer used, please use --no-{0} instead."
a__ : List[Any] = " ".join(str(__a ).split(" " )[:-1] )
a__ : str = ""
a__ : List[Any] = eval(str(__a ).split(" " )[-1] )
a__ : List[str] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__a )
if len(__a ) > 0:
a__ : Tuple = full_error_msg + begin_error_msg + str(__a )
raise ValueError(__a )
benchmark.run()
if __name__ == "__main__":
main()
| 37 | 0 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
__a: Optional[Any] = '''\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
'''
__a: List[Any] = '''\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
'''
__a: Any = '''
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"precision": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'precision@10\': 1.0}
'''
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> Optional[Any]:
return float((preds == labels).mean() )
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> Optional[int]:
_UpperCAmelCase = simple_accuracy(__snake_case , __snake_case )
_UpperCAmelCase = float(fa_score(y_true=__snake_case , y_pred=__snake_case ) )
return {
"accuracy": acc,
"f1": fa,
}
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> Any:
_UpperCAmelCase = np.array(__snake_case )
_UpperCAmelCase = np.array(__snake_case )
_UpperCAmelCase = en_sentvecs.shape[0]
# mean centering
_UpperCAmelCase = en_sentvecs - np.mean(__snake_case , axis=0 )
_UpperCAmelCase = in_sentvecs - np.mean(__snake_case , axis=0 )
_UpperCAmelCase = cdist(__snake_case , __snake_case , """cosine""" )
_UpperCAmelCase = np.array(range(__snake_case ) )
_UpperCAmelCase = sim.argsort(axis=1 )[:, :1_0]
_UpperCAmelCase = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def lowerCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """
"""\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """
"""\"wiki-ner\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" )
if self.config_name != """cvit-mkb-clsr"""
else datasets.Sequence(datasets.Value("""float32""" ) ),
"""references""": datasets.Value("""int64""" )
if self.config_name != """cvit-mkb-clsr"""
else datasets.Sequence(datasets.Value("""float32""" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if self.config_name != """cvit-mkb-clsr""" else None , )
def lowerCamelCase ( self : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(lowerCamelCase , lowerCamelCase )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(lowerCamelCase , lowerCamelCase )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(lowerCamelCase , lowerCamelCase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """
"""\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """
"""\"wiki-ner\"]""" ) | 108 |
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
UpperCamelCase : Optional[int] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def UpperCamelCase_ ( __a ) -> Any:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def UpperCamelCase_ ( __a , __a , __a ) -> Any:
return max(metric_fn(__a , __a ) for gt in ground_truths )
def UpperCamelCase_ ( __a , __a , __a ) -> List[str]:
a__ : Tuple = [line.strip() for line in open(__a , "r" ).readlines()]
a__ : Tuple = []
if args.gold_data_mode == "qa":
a__ : Any = pd.read_csv(__a , sep="\t" , header=__a )
for answer_list in data[1]:
a__ : Union[str, Any] = ast.literal_eval(__a )
answers.append(__a )
else:
a__ : List[str] = [line.strip() for line in open(__a , "r" ).readlines()]
a__ : List[str] = [[reference] for reference in references]
a__ : List[str] = 0
for prediction, ground_truths in zip(__a , __a ):
total += 1
em += metric_max_over_ground_truths(__a , __a , __a )
fa += metric_max_over_ground_truths(__a , __a , __a )
a__ : Dict = 100.0 * em / total
a__ : Optional[Any] = 100.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]:
a__ : Optional[Any] = args.k
a__ : str = [line.strip() for line in open(__a , "r" ).readlines()]
a__ : Tuple = [line.strip() for line in open(__a , "r" ).readlines()]
a__ : Tuple = 0
for hypo, reference in zip(__a , __a ):
a__ : Any = set(hypo.split("\t" )[:k] )
a__ : Union[str, Any] = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
a__ : Union[str, Any] = 100.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]:
def strip_title(__a ):
if title.startswith("\"" ):
a__ : Optional[Any] = title[1:]
if title.endswith("\"" ):
a__ : Union[str, Any] = title[:-1]
return title
a__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__a , return_tensors="pt" , padding=__a , truncation=__a , )["input_ids"].to(args.device )
a__ : Optional[int] = rag_model.rag.question_encoder(__a )
a__ : Union[str, Any] = question_enc_outputs[0]
a__ : Optional[int] = rag_model.retriever(
__a , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
a__ : List[Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
a__ : int = []
for docs in all_docs:
a__ : Optional[int] = [strip_title(__a ) for title in docs["title"]]
provenance_strings.append("\t".join(__a ) )
return provenance_strings
def UpperCamelCase_ ( __a , __a , __a ) -> Dict:
with torch.no_grad():
a__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__a , return_tensors="pt" , padding=__a , truncation=__a )
a__ : Any = inputs_dict.input_ids.to(args.device )
a__ : Dict = inputs_dict.attention_mask.to(args.device )
a__ : Optional[int] = rag_model.generate( # rag_model overwrites generate
__a , attention_mask=__a , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__a , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
a__ : int = rag_model.retriever.generator_tokenizer.batch_decode(__a , skip_special_tokens=__a )
if args.print_predictions:
for q, a in zip(__a , __a ):
logger.info("Q: {} - A: {}".format(__a , __a ) )
return answers
def UpperCamelCase_ ( ) -> List[str]:
a__ : int = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=__a , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=__a , choices=["exact", "compressed", "legacy"] , type=__a , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=__a , type=__a , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=__a , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=__a , type=__a , required=__a , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=__a , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=__a , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=__a , type=__a , required=__a , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=__a , type=__a , required=__a , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=__a , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=__a , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=__a , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=__a , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=__a , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=__a , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
a__ : int = parser.parse_args()
a__ : Dict = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def UpperCamelCase_ ( __a ) -> Optional[int]:
a__ : Tuple = {}
if args.model_type is None:
a__ : List[str] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
a__ : int = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
a__ : Tuple = args.n_docs
if args.index_name is not None:
a__ : Any = args.index_name
if args.index_path is not None:
a__ : int = args.index_path
else:
a__ : Optional[Any] = BartForConditionalGeneration
a__ : Tuple = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , __a )
a__ : Any = get_scores if args.eval_mode == "e2e" else get_precision_at_k
a__ : Union[str, Any] = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(__a , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(__a ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
a__ : str = RagRetriever.from_pretrained(__a , **__a )
a__ : Optional[int] = model_class.from_pretrained(__a , retriever=__a , **__a )
model.retriever.init_retrieval()
else:
a__ : Dict = model_class.from_pretrained(__a , **__a )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
a__ : List[Any] = []
for line in tqdm(__a ):
questions.append(line.strip() )
if len(__a ) == args.eval_batch_size:
a__ : Union[str, Any] = evaluate_batch_fn(__a , __a , __a )
preds_file.write("\n".join(__a ) + "\n" )
preds_file.flush()
a__ : Any = []
if len(__a ) > 0:
a__ : List[str] = evaluate_batch_fn(__a , __a , __a )
preds_file.write("\n".join(__a ) )
preds_file.flush()
score_fn(__a , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
UpperCamelCase : List[Any] = get_args()
main(args)
| 37 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 109 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a = None , ) -> str:
a__ : int = {}
if train_file is not None:
a__ : int = [train_file]
if eval_file is not None:
a__ : Union[str, Any] = [eval_file]
if test_file is not None:
a__ : str = [test_file]
a__ : Optional[Any] = datasets.load_dataset("csv" , data_files=__a )
a__ : List[Any] = list(ds[list(files.keys() )[0]].features.keys() )
a__ : str = features_name.pop(__a )
a__ : Dict = list(set(ds[list(files.keys() )[0]][label_name] ) )
a__ : str = {label: i for i, label in enumerate(__a )}
a__ : Tuple = tokenizer.model_input_names
a__ : List[str] = {}
if len(__a ) == 1:
for k in files.keys():
a__ : Optional[Any] = ds[k].map(
lambda __a : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__a , max_length=__a , padding="max_length" ) , batched=__a , )
elif len(__a ) == 2:
for k in files.keys():
a__ : Dict = ds[k].map(
lambda __a : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__a , max_length=__a , padding="max_length" , ) , batched=__a , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
a__ : str = {k: v for k, v in ex.items() if k in input_names}
a__ : str = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
a__ : Tuple = {k: v for k, v in ex.items() if k in input_names}
a__ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
a__ : List[Any] = {k: v for k, v in ex.items() if k in input_names}
a__ : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
a__ : Optional[Any] = (
tf.data.Dataset.from_generator(
__a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
a__ : Optional[int] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
a__ : Union[str, Any] = (
tf.data.Dataset.from_generator(
__a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
a__ : Optional[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
a__ : Union[str, Any] = (
tf.data.Dataset.from_generator(
__a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
a__ : Tuple = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
UpperCamelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass
class A__ :
"""simple docstring"""
_lowercase = field(metadata={'help': 'Which column contains the label'} )
_lowercase = field(default=A__ , metadata={'help': 'The path of the training file'} )
_lowercase = field(default=A__ , metadata={'help': 'The path of the development file'} )
_lowercase = field(default=A__ , metadata={'help': 'The path of the test file'} )
_lowercase = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_lowercase = field(
default=A__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class A__ :
"""simple docstring"""
_lowercase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_lowercase = field(
default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_lowercase = field(
default=A__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_lowercase = field(default=A__ , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_lowercase = field(
default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def UpperCamelCase_ ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a__ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
a__, a__, a__ : str = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a__ : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a__, a__, a__, a__ : Optional[Any] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__a , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
a__ : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__a ) , labelaid=__a , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
a__ : Any = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , )
def compute_metrics(__a ) -> Dict:
a__ : Union[str, Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
a__ : Dict = TFTrainer(
model=__a , args=__a , train_dataset=__a , eval_dataset=__a , compute_metrics=__a , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a__ : Optional[Any] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a__ : Dict = trainer.evaluate()
a__ : int = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(__a , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(__a )
return results
if __name__ == "__main__":
main()
| 37 | 0 |
"""simple docstring"""
def lowerCamelCase ( _snake_case ):
if len(_snake_case ) < 2:
return collection
def circle_sort_util(_snake_case ,_snake_case ,_snake_case ) -> bool:
UpperCAmelCase__ : Optional[Any] = False
if low == high:
return swapped
UpperCAmelCase__ : List[Any] = low
UpperCAmelCase__ : int = high
while left < right:
if collection[left] > collection[right]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = (
collection[right],
collection[left],
)
UpperCAmelCase__ : Union[str, Any] = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = (
collection[right + 1],
collection[left],
)
UpperCAmelCase__ : Optional[Any] = True
UpperCAmelCase__ : Any = low + int((high - low) / 2 )
UpperCAmelCase__ : int = circle_sort_util(_snake_case ,_snake_case ,_snake_case )
UpperCAmelCase__ : Tuple = circle_sort_util(_snake_case ,mid + 1 ,_snake_case )
return swapped or left_swap or right_swap
UpperCAmelCase__ : str = True
while is_not_sorted is True:
UpperCAmelCase__ : Union[str, Any] = circle_sort_util(_snake_case ,0 ,len(_snake_case ) - 1 )
return collection
if __name__ == "__main__":
UpperCamelCase__ = input('Enter numbers separated by a comma:\n').strip()
UpperCamelCase__ = [int(item) for item in user_input.split(',')]
print(circle_sort(unsorted))
| 110 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
UpperCamelCase : List[str] = re.compile(r"""\b(a|an|the)\b""", re.UNICODE)
UpperCamelCase : Union[str, Any] = None
def UpperCamelCase_ ( ) -> List[str]:
a__ : List[Any] = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=__a , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=__a , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def UpperCamelCase_ ( __a ) -> str:
a__ : Optional[Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a__ : Dict = bool(qa["answers"]["text"] )
return qid_to_has_ans
def UpperCamelCase_ ( __a ) -> List[Any]:
def remove_articles(__a ):
return ARTICLES_REGEX.sub(" " , __a )
def white_space_fix(__a ):
return " ".join(text.split() )
def remove_punc(__a ):
a__ : Union[str, Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__a ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__a ) ) ) )
def UpperCamelCase_ ( __a ) -> Dict:
if not s:
return []
return normalize_answer(__a ).split()
def UpperCamelCase_ ( __a , __a ) -> str:
return int(normalize_answer(__a ) == normalize_answer(__a ) )
def UpperCamelCase_ ( __a , __a ) -> Dict:
a__ : int = get_tokens(__a )
a__ : Optional[Any] = get_tokens(__a )
a__ : Any = collections.Counter(__a ) & collections.Counter(__a )
a__ : Dict = sum(common.values() )
if len(__a ) == 0 or len(__a ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
a__ : Tuple = 1.0 * num_same / len(__a )
a__ : str = 1.0 * num_same / len(__a )
a__ : str = (2 * precision * recall) / (precision + recall)
return fa
def UpperCamelCase_ ( __a , __a ) -> int:
a__ : List[str] = {}
a__ : Optional[int] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a__ : List[Any] = qa["id"]
a__ : Dict = [t for t in qa["answers"]["text"] if normalize_answer(__a )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
a__ : Tuple = [""]
if qid not in preds:
print(f'''Missing prediction for {qid}''' )
continue
a__ : Tuple = preds[qid]
# Take max over all gold answers
a__ : Optional[int] = max(compute_exact(__a , __a ) for a in gold_answers )
a__ : str = max(compute_fa(__a , __a ) for a in gold_answers )
return exact_scores, fa_scores
def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[int]:
a__ : Optional[Any] = {}
for qid, s in scores.items():
a__ : Dict = na_probs[qid] > na_prob_thresh
if pred_na:
a__ : Dict = float(not qid_to_has_ans[qid] )
else:
a__ : Optional[Any] = s
return new_scores
def UpperCamelCase_ ( __a , __a , __a=None ) -> Tuple:
if not qid_list:
a__ : Union[str, Any] = len(__a )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
a__ : int = len(__a )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def UpperCamelCase_ ( __a , __a , __a ) -> List[str]:
for k in new_eval:
a__ : Optional[Any] = new_eval[k]
def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[int]:
plt.step(__a , __a , color="b" , alpha=0.2 , where="post" )
plt.fill_between(__a , __a , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__a )
plt.savefig(__a )
plt.clf()
def UpperCamelCase_ ( __a , __a , __a , __a , __a=None , __a=None ) -> Dict:
a__ : Optional[Any] = sorted(__a , key=lambda __a : na_probs[k] )
a__ : Any = 0.0
a__ : Optional[int] = 1.0
a__ : Optional[int] = 0.0
a__ : Any = [1.0]
a__ : Tuple = [0.0]
a__ : List[str] = 0.0
for i, qid in enumerate(__a ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
a__ : Any = true_pos / float(i + 1 )
a__ : int = true_pos / float(__a )
if i == len(__a ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__a )
recalls.append(__a )
if out_image:
plot_pr_curve(__a , __a , __a , __a )
return {"ap": 100.0 * avg_prec}
def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a ) -> str:
if out_image_dir and not os.path.exists(__a ):
os.makedirs(__a )
a__ : Optional[int] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
a__ : Optional[int] = make_precision_recall_eval(
__a , __a , __a , __a , out_image=os.path.join(__a , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
a__ : Optional[Any] = make_precision_recall_eval(
__a , __a , __a , __a , out_image=os.path.join(__a , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
a__ : str = {k: float(__a ) for k, v in qid_to_has_ans.items()}
a__ : Optional[Any] = make_precision_recall_eval(
__a , __a , __a , __a , out_image=os.path.join(__a , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(__a , __a , "pr_exact" )
merge_eval(__a , __a , "pr_f1" )
merge_eval(__a , __a , "pr_oracle" )
def UpperCamelCase_ ( __a , __a , __a , __a ) -> str:
if not qid_list:
return
a__ : Optional[Any] = [na_probs[k] for k in qid_list]
a__ : str = np.ones_like(__a ) / float(len(__a ) )
plt.hist(__a , weights=__a , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(f'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(__a , f'''na_prob_hist_{name}.png''' ) )
plt.clf()
def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[Any]:
a__ : str = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
a__ : Optional[Any] = num_no_ans
a__ : Dict = cur_score
a__ : Any = 0.0
a__ : Optional[Any] = sorted(__a , key=lambda __a : na_probs[k] )
for i, qid in enumerate(__a ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
a__ : Optional[int] = scores[qid]
else:
if preds[qid]:
a__ : str = -1
else:
a__ : Union[str, Any] = 0
cur_score += diff
if cur_score > best_score:
a__ : Any = cur_score
a__ : Dict = na_probs[qid]
return 100.0 * best_score / len(__a ), best_thresh
def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a ) -> Any:
a__, a__ : Tuple = find_best_thresh(__a , __a , __a , __a )
a__, a__ : Tuple = find_best_thresh(__a , __a , __a , __a )
a__ : Any = best_exact
a__ : Any = exact_thresh
a__ : List[Any] = best_fa
a__ : Optional[int] = fa_thresh
def UpperCamelCase_ ( ) -> Tuple:
with open(OPTS.data_file ) as f:
a__ : List[Any] = json.load(__a )
a__ : Any = dataset_json["data"]
with open(OPTS.pred_file ) as f:
a__ : int = json.load(__a )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
a__ : List[str] = json.load(__a )
else:
a__ : Optional[int] = {k: 0.0 for k in preds}
a__ : Optional[Any] = make_qid_to_has_ans(__a ) # maps qid to True/False
a__ : List[Any] = [k for k, v in qid_to_has_ans.items() if v]
a__ : Union[str, Any] = [k for k, v in qid_to_has_ans.items() if not v]
a__, a__ : str = get_raw_scores(__a , __a )
a__ : str = apply_no_ans_threshold(__a , __a , __a , OPTS.na_prob_thresh )
a__ : str = apply_no_ans_threshold(__a , __a , __a , OPTS.na_prob_thresh )
a__ : Tuple = make_eval_dict(__a , __a )
if has_ans_qids:
a__ : str = make_eval_dict(__a , __a , qid_list=__a )
merge_eval(__a , __a , "HasAns" )
if no_ans_qids:
a__ : List[Any] = make_eval_dict(__a , __a , qid_list=__a )
merge_eval(__a , __a , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(__a , __a , __a , __a , __a , __a )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__a , __a , __a , __a , __a , OPTS.out_image_dir )
histogram_na_prob(__a , __a , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(__a , __a , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(__a , __a )
else:
print(json.dumps(__a , indent=2 ) )
if __name__ == "__main__":
UpperCamelCase : Any = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 37 | 0 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Optional[int] ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
__lowerCAmelCase = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=lowerCamelCase__ , cache_dir=lowerCamelCase__ )
__lowerCAmelCase = [t[-1] for t in os.walk(os.path.join(lowerCamelCase__ , os.listdir(lowerCamelCase__ )[0] , 'snapshots' ) )]
__lowerCAmelCase = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : str ) -> Any:
__lowerCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=lowerCamelCase__ )
__lowerCAmelCase = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__lowerCAmelCase = jax.random.PRNGKey(0 )
__lowerCAmelCase = 4
__lowerCAmelCase = jax.device_count()
__lowerCAmelCase = num_samples * [prompt]
__lowerCAmelCase = pipeline.prepare_inputs(lowerCamelCase__ )
# shard inputs and rng
__lowerCAmelCase = replicate(lowerCamelCase__ )
__lowerCAmelCase = jax.random.split(lowerCamelCase__ , lowerCamelCase__ )
__lowerCAmelCase = shard(lowerCamelCase__ )
__lowerCAmelCase = pipeline(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , jit=lowerCamelCase__ ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1e-3
assert np.abs(np.abs(lowerCamelCase__ , dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5e-1
__lowerCAmelCase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCamelCase__ ) == num_samples
def lowercase ( self : int ) -> Tuple:
__lowerCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=lowerCamelCase__ )
__lowerCAmelCase = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__lowerCAmelCase = jax.random.PRNGKey(0 )
__lowerCAmelCase = 5_0
__lowerCAmelCase = jax.device_count()
__lowerCAmelCase = num_samples * [prompt]
__lowerCAmelCase = pipeline.prepare_inputs(lowerCamelCase__ )
# shard inputs and rng
__lowerCAmelCase = replicate(lowerCamelCase__ )
__lowerCAmelCase = jax.random.split(lowerCamelCase__ , lowerCamelCase__ )
__lowerCAmelCase = shard(lowerCamelCase__ )
__lowerCAmelCase = pipeline(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , jit=lowerCamelCase__ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase__ , dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5e-1
def lowercase ( self : Optional[Any] ) -> Tuple:
__lowerCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=lowerCamelCase__ )
__lowerCAmelCase = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__lowerCAmelCase = jax.random.PRNGKey(0 )
__lowerCAmelCase = 5_0
__lowerCAmelCase = jax.device_count()
__lowerCAmelCase = num_samples * [prompt]
__lowerCAmelCase = pipeline.prepare_inputs(lowerCamelCase__ )
# shard inputs and rng
__lowerCAmelCase = replicate(lowerCamelCase__ )
__lowerCAmelCase = jax.random.split(lowerCamelCase__ , lowerCamelCase__ )
__lowerCAmelCase = shard(lowerCamelCase__ )
__lowerCAmelCase = pipeline(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , jit=lowerCamelCase__ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase__ , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def lowercase ( self : Optional[int] ) -> List[Any]:
__lowerCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
__lowerCAmelCase = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__lowerCAmelCase = jax.random.PRNGKey(0 )
__lowerCAmelCase = 5_0
__lowerCAmelCase = jax.device_count()
__lowerCAmelCase = num_samples * [prompt]
__lowerCAmelCase = pipeline.prepare_inputs(lowerCamelCase__ )
# shard inputs and rng
__lowerCAmelCase = replicate(lowerCamelCase__ )
__lowerCAmelCase = jax.random.split(lowerCamelCase__ , lowerCamelCase__ )
__lowerCAmelCase = shard(lowerCamelCase__ )
__lowerCAmelCase = pipeline(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , jit=lowerCamelCase__ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase__ , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def lowercase ( self : List[Any] ) -> List[str]:
__lowerCAmelCase = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , set_alpha_to_one=lowerCamelCase__ , steps_offset=1 , )
__lowerCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ , )
__lowerCAmelCase = scheduler.create_state()
__lowerCAmelCase = scheduler_state
__lowerCAmelCase = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__lowerCAmelCase = jax.random.PRNGKey(0 )
__lowerCAmelCase = 5_0
__lowerCAmelCase = jax.device_count()
__lowerCAmelCase = num_samples * [prompt]
__lowerCAmelCase = pipeline.prepare_inputs(lowerCamelCase__ )
# shard inputs and rng
__lowerCAmelCase = replicate(lowerCamelCase__ )
__lowerCAmelCase = jax.random.split(lowerCamelCase__ , lowerCamelCase__ )
__lowerCAmelCase = shard(lowerCamelCase__ )
__lowerCAmelCase = pipeline(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , jit=lowerCamelCase__ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase__ , dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5e-1
def lowercase ( self : int ) -> List[str]:
__lowerCAmelCase = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__lowerCAmelCase = jax.device_count()
__lowerCAmelCase = num_samples * [prompt]
__lowerCAmelCase = jax.random.split(jax.random.PRNGKey(0 ) , lowerCamelCase__ )
__lowerCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=lowerCamelCase__ , )
__lowerCAmelCase = replicate(lowerCamelCase__ )
__lowerCAmelCase = pipeline.prepare_inputs(lowerCamelCase__ )
__lowerCAmelCase = shard(lowerCamelCase__ )
__lowerCAmelCase = pipeline(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , jit=lowerCamelCase__ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
__lowerCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=lowerCamelCase__ , use_memory_efficient_attention=lowerCamelCase__ , )
__lowerCAmelCase = replicate(lowerCamelCase__ )
__lowerCAmelCase = pipeline.prepare_inputs(lowerCamelCase__ )
__lowerCAmelCase = shard(lowerCamelCase__ )
__lowerCAmelCase = pipeline(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , jit=lowerCamelCase__ ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 53 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = CLIPTokenizer
_lowercase = CLIPTokenizerFast
_lowercase = True
_lowercase = {}
_lowercase = False
def _UpperCamelCase( self : List[Any] ):
super().setUp()
# fmt: off
a__ : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
a__ : Optional[Any] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
a__ : Optional[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
a__ : Optional[Any] = {"unk_token": "<unk>"}
a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCamelCase__ ) )
def _UpperCamelCase( self : Dict , **lowerCamelCase__ : int ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def _UpperCamelCase( self : Union[str, Any] , **lowerCamelCase__ : Optional[int] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : Optional[Any] ):
a__ : int = "lower newer"
a__ : Optional[int] = "lower newer"
return input_text, output_text
def _UpperCamelCase( self : List[str] ):
a__ : Union[str, Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a__ : int = "lower newer"
a__ : List[str] = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
a__ : Union[str, Any] = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : int = tokens + [tokenizer.unk_token]
a__ : Union[str, Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
@require_ftfy
def _UpperCamelCase( self : Optional[Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a__ : List[str] = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
a__ : Any = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
a__ : int = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
a__ : Optional[Any] = tokenizer_s.tokenize(lowerCamelCase__ )
a__ : Dict = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
a__ : Optional[Any] = "xa\u0303y" + " " + "x\xe3y"
a__ : Optional[int] = tokenizer_s.tokenize(lowerCamelCase__ )
a__ : int = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Test that the tokenization is identical on unicode of space type
a__ : str = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
a__ : Any = tokenizer_s.tokenize(lowerCamelCase__ )
a__ : int = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Test that the tokenization is identical on unicode of line break type
a__ : Union[str, Any] = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
a__ : List[Any] = tokenizer_s.tokenize(lowerCamelCase__ )
a__ : int = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a__ : str = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
a__ : Tuple = f'''{text_of_1_token} {text_of_1_token}'''
a__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , )
a__ : Union[str, Any] = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase__ ) + 1, len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
a__ : Optional[Any] = f''' {text}'''
a__ : str = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , )
a__ : Dict = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase__ ) + 1, 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
def _UpperCamelCase( self : int ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowerCamelCase__ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def _UpperCamelCase( self : int ):
super().test_tokenization_python_rust_equals()
def _UpperCamelCase( self : str ):
# CLIP always lower cases letters
pass
| 37 | 0 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__="" ) -> str:
'''simple docstring'''
UpperCAmelCase = tempfile.mkdtemp()
return os.path.join(__a , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
UpperCAmelCase = AgentAudio(lowerCamelCase__ )
UpperCAmelCase = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCamelCase__ , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
# Ensure that the file contains the same value as the original tensor
UpperCAmelCase = sf.read(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , torch.tensor(lowerCamelCase__ ) , atol=1E-4 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
UpperCAmelCase = get_new_path(suffix='''.wav''' )
sf.write(lowerCamelCase__ , lowerCamelCase__ , 1_6_0_0_0 )
UpperCAmelCase = AgentAudio(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , lowerCamelCase__ )
@require_vision
@require_torch
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = torch.randint(0 , 2_5_6 , (6_4, 6_4, 3) )
UpperCAmelCase = AgentImage(lowerCamelCase__ )
UpperCAmelCase = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCamelCase__ , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / "000000039769.png"
UpperCAmelCase = Image.open(lowerCamelCase__ )
UpperCAmelCase = AgentImage(lowerCamelCase__ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / "000000039769.png"
UpperCAmelCase = Image.open(lowerCamelCase__ )
UpperCAmelCase = AgentImage(lowerCamelCase__ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = "Hey!"
UpperCAmelCase = AgentText(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , agent_type.to_string() )
self.assertEqual(lowerCamelCase__ , agent_type.to_raw() )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
| 130 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
UpperCamelCase : Dict = """<<<<<<< This should probably be modified because it mentions: """
UpperCamelCase : List[Any] = """=======
>>>>>>>
"""
UpperCamelCase : Optional[Any] = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
UpperCamelCase : Any = [
# (pattern, replacement)
# Order is important here for some replacements
(r"""tfds\.core""", r"""datasets"""),
(r"""tf\.io\.gfile\.GFile""", r"""open"""),
(r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""),
(r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""),
(r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""),
(r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""),
(r"""tfds\.features\.FeaturesDict\(""", r"""dict("""),
(r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(r"""tfds\.""", r"""datasets."""),
(r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""),
(r"""self\.builder_config""", r"""self.config"""),
]
def UpperCamelCase_ ( __a ) -> Optional[Any]:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class A__ ( A__ ):
"""simple docstring"""
@staticmethod
def _UpperCamelCase( lowerCamelCase__ : ArgumentParser ):
a__ : List[str] = parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str , *lowerCamelCase__ : Tuple ):
a__ : str = get_logger("datasets-cli/converting" )
a__ : Optional[Any] = tfds_path
a__ : Optional[int] = datasets_directory
def _UpperCamelCase( self : int ):
if os.path.isdir(self._tfds_path ):
a__ : List[str] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
a__ : Any = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
a__ : Dict = os.path.abspath(self._datasets_directory )
self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
a__ : Tuple = []
a__ : str = []
a__ : List[Any] = {}
if os.path.isdir(self._tfds_path ):
a__ : List[str] = os.listdir(lowerCamelCase__ )
else:
a__ : Union[str, Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'''Looking at file {f_name}''' )
a__ : Any = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
a__ : Dict = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
if not os.path.isfile(lowerCamelCase__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(lowerCamelCase__ , encoding="utf-8" ) as f:
a__ : List[Any] = f.readlines()
a__ : Union[str, Any] = []
a__ : Union[str, Any] = False
a__ : Union[str, Any] = False
a__ : Dict = []
for line in lines:
a__ : Optional[Any] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
a__ : List[Any] = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
a__ : List[str] = ""
continue
elif "from absl import logging" in out_line:
a__ : Dict = "from datasets import logging\n"
elif "getLogger" in out_line:
a__ : List[Any] = out_line.replace("getLogger" , "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
a__ : List[str] = True
a__ : Dict = list(filter(lambda lowerCamelCase__ : e in out_line , lowerCamelCase__ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCamelCase__ ) + "\n" )
out_lines.append(lowerCamelCase__ )
out_lines.append(lowerCamelCase__ )
continue
else:
for pattern, replacement in TO_CONVERT:
a__ : Tuple = re.sub(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
a__ : Optional[int] = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , lowerCamelCase__ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
a__ : Optional[Any] = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
a__ : Optional[int] = True
out_lines.append(lowerCamelCase__ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
a__ : Dict = f_name.replace(".py" , "" )
a__ : Optional[int] = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
a__ : Any = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
self._logger.info(f'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCamelCase__ )
if needs_manual_update:
with_manual_update.append(lowerCamelCase__ )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f:
f.writelines(lowerCamelCase__ )
self._logger.info(f'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
a__ : Any = os.path.basename(lowerCamelCase__ )
a__ : Optional[int] = imports_to_builder_map[f_name.replace(".py" , "" )]
self._logger.info(f'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(lowerCamelCase__ , lowerCamelCase__ )
except KeyError:
self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 37 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCAmelCase : int = logging.get_logger(__name__)
class a_ ( A__ ):
UpperCamelCase_ : int = ["pixel_values"]
def __init__( self : int , snake_case__ : bool = True , snake_case__ : Dict[str, int] = None , snake_case__ : int = 0.9 , snake_case__ : PILImageResampling = PILImageResampling.BICUBIC , snake_case__ : bool = True , snake_case__ : Dict[str, int] = None , snake_case__ : Union[int, float] = 1 / 255 , snake_case__ : bool = True , snake_case__ : bool = True , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[Union[float, List[float]]] = None , **snake_case__ : Any , ):
super().__init__(**lowerCamelCase__ )
lowerCAmelCase__ = size if size is not None else {"shortest_edge": 224}
lowerCAmelCase__ = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
lowerCAmelCase__ = crop_size if crop_size is not None else {"height": 224, "width": 224}
lowerCAmelCase__ = get_size_dict(lowerCamelCase__ , param_name="""crop_size""" )
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = crop_pct
lowerCAmelCase__ = resample
lowerCAmelCase__ = do_center_crop
lowerCAmelCase__ = crop_size
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = rescale_factor
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCAmelCase__ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : np.ndarray , snake_case__ : Dict[str, int] , snake_case__ : Optional[float] = None , snake_case__ : PILImageResampling = PILImageResampling.BICUBIC , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : str , ):
lowerCAmelCase__ = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F"""size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}""" )
if crop_pct is not None:
if "shortest_edge" in size:
lowerCAmelCase__ = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
lowerCAmelCase__ = int(size["""height"""] / crop_pct )
else:
lowerCAmelCase__ = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(lowerCamelCase__ ) )
lowerCAmelCase__ = get_resize_output_image_size(lowerCamelCase__ , size=lowerCamelCase__ , default_to_square=lowerCamelCase__ )
else:
if "shortest_edge" in size:
lowerCAmelCase__ = get_resize_output_image_size(lowerCamelCase__ , size=size["""shortest_edge"""] , default_to_square=lowerCamelCase__ )
elif "height" in size and "width" in size:
lowerCAmelCase__ = (size["height"], size["width"])
else:
raise ValueError("""Invalid size for resize: {}""".format(lowerCamelCase__ ) )
return resize(lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : np.ndarray , snake_case__ : Dict[str, int] , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : Any , ):
lowerCAmelCase__ = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""size must contain \'height\' and \'width\' as keys. Got {size.keys()}""" )
return center_crop(lowerCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : np.ndarray , snake_case__ : Union[int, float] , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : Tuple , ):
return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : np.ndarray , snake_case__ : Union[float, List[float]] , snake_case__ : Union[float, List[float]] , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : Any , ):
return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : ImageInput , snake_case__ : bool = None , snake_case__ : Dict[str, int] = None , snake_case__ : int = None , snake_case__ : PILImageResampling = None , snake_case__ : bool = None , snake_case__ : Dict[str, int] = None , snake_case__ : bool = None , snake_case__ : float = None , snake_case__ : bool = None , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : ChannelDimension = ChannelDimension.FIRST , **snake_case__ : str , ):
lowerCAmelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ = crop_pct if crop_pct is not None else self.crop_pct
lowerCAmelCase__ = resample if resample is not None else self.resample
lowerCAmelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ = image_std if image_std is not None else self.image_std
lowerCAmelCase__ = size if size is not None else self.size
lowerCAmelCase__ = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
lowerCAmelCase__ = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ = get_size_dict(lowerCamelCase__ , param_name="""crop_size""" )
lowerCAmelCase__ = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCAmelCase__ = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
lowerCAmelCase__ = [self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , crop_pct=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images]
if do_center_crop:
lowerCAmelCase__ = [self.center_crop(image=lowerCamelCase__ , size=lowerCamelCase__ ) for image in images]
if do_rescale:
lowerCAmelCase__ = [self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ ) for image in images]
if do_normalize:
lowerCAmelCase__ = [self.normalize(image=lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ ) for image in images]
lowerCAmelCase__ = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images]
lowerCAmelCase__ = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ )
| 644 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class A__ ( A__ ):
"""simple docstring"""
_lowercase = ''
_lowercase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_lowercase = None # compression type in fsspec. ex: "gzip"
_lowercase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : List[str] , lowerCamelCase__ : str = "" , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[dict] = None , **lowerCamelCase__ : List[str] ):
super().__init__(self , **lowerCamelCase__ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
a__ : str = fsspec.open(
lowerCamelCase__ , mode="rb" , protocol=lowerCamelCase__ , compression=self.compression , client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
a__ : Optional[int] = os.path.basename(self.file.path.split("::" )[0] )
a__ : int = (
self.compressed_name[: self.compressed_name.rindex("." )]
if "." in self.compressed_name
else self.compressed_name
)
a__ : List[Any] = None
@classmethod
def _UpperCamelCase( cls : int , lowerCamelCase__ : int ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowerCamelCase__ ).lstrip("/" )
def _UpperCamelCase( self : Dict ):
if self.dir_cache is None:
a__ : Dict = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name}
a__ : int = {f["name"]: f}
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : str ):
return self.file.open().read()
def _UpperCamelCase( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str = "rb" , lowerCamelCase__ : int=None , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[str]=None , **lowerCamelCase__ : Optional[Any] , ):
a__ : Optional[int] = self._strip_protocol(lowerCamelCase__ )
if mode != "rb":
raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'bz2'
_lowercase = 'bz2'
_lowercase = '.bz2'
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'gzip'
_lowercase = 'gzip'
_lowercase = '.gz'
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'lz4'
_lowercase = 'lz4'
_lowercase = '.lz4'
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'xz'
_lowercase = 'xz'
_lowercase = '.xz'
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'zstd'
_lowercase = 'zstd'
_lowercase = '.zst'
def __init__( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : str = "rb" , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[dict] = None , lowerCamelCase__ : int = DEFAULT_BLOCK_SIZE , **lowerCamelCase__ : Tuple , ):
super().__init__(
fo=lowerCamelCase__ , mode=lowerCamelCase__ , target_protocol=lowerCamelCase__ , target_options=lowerCamelCase__ , block_size=lowerCamelCase__ , **lowerCamelCase__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
a__ : Any = self.file.__enter__
class A__ :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase__ : str ):
a__ : List[Any] = file_
def __enter__( self : str ):
self._file.__enter__()
return self
def __exit__( self : int , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : int ):
self._file.__exit__(*lowerCamelCase__ , **lowerCamelCase__ )
def __iter__( self : List[str] ):
return iter(self._file )
def _UpperCamelCase( self : Any ):
return next(self._file )
def __getattr__( self : Optional[Any] , lowerCamelCase__ : Tuple ):
return getattr(self._file , lowerCamelCase__ )
def fixed_enter(*lowerCamelCase__ : List[str] , **lowerCamelCase__ : str ):
return WrappedFile(_enter(*lowerCamelCase__ , **lowerCamelCase__ ) )
a__ : Any = fixed_enter
| 37 | 0 |
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
_lowerCAmelCase :int = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A , A=1_6 , A=1_3 , A=7 , A=1_4 , A=1_0 , A=1_9 , A=5 , A=4 , A=True , A=1_6 , A=2 , A=4 , A=4 , A="gelu" , A=0.1 , A=0.1 , A=[1, 2, 3, 4, 5] , A=2_5 , A=5 , ) -> Any:
_UpperCAmelCase : Optional[Any] = d_model
_UpperCAmelCase : List[str] = parent
_UpperCAmelCase : str = batch_size
_UpperCAmelCase : List[Any] = prediction_length
_UpperCAmelCase : List[Any] = context_length
_UpperCAmelCase : str = cardinality
_UpperCAmelCase : List[Any] = num_time_features
_UpperCAmelCase : Dict = lags_sequence
_UpperCAmelCase : int = embedding_dimension
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : Dict = hidden_size
_UpperCAmelCase : int = num_hidden_layers
_UpperCAmelCase : str = num_attention_heads
_UpperCAmelCase : Dict = intermediate_size
_UpperCAmelCase : List[str] = hidden_act
_UpperCAmelCase : str = hidden_dropout_prob
_UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = context_length
_UpperCAmelCase : Any = prediction_length + label_length
_UpperCAmelCase : List[Any] = label_length
_UpperCAmelCase : int = moving_average
_UpperCAmelCase : Any = autocorrelation_factor
def __lowerCAmelCase ( self ) -> Any:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def __lowerCAmelCase ( self , A ) -> List[Any]:
_UpperCAmelCase : str = config.context_length + max(config.lags_sequence )
_UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, _past_length] )
_UpperCAmelCase : Any = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, config.prediction_length] )
_UpperCAmelCase : str = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : Dict = self.get_config()
_UpperCAmelCase : Any = self.prepare_autoformer_inputs_dict(lowerCamelCase__ )
return config, inputs_dict
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCAmelCase ( self , A , A ) -> int:
_UpperCAmelCase : Union[str, Any] = AutoformerModel(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
_UpperCAmelCase : List[Any] = model(**lowerCamelCase__ )
_UpperCAmelCase : int = outputs.encoder_last_hidden_state
_UpperCAmelCase : Optional[Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase : str = model.get_encoder()
encoder.save_pretrained(lowerCamelCase__ )
_UpperCAmelCase : List[str] = AutoformerEncoder.from_pretrained(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = model.create_network_inputs(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_UpperCAmelCase : str = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_UpperCAmelCase : Optional[int] = encoder(inputs_embeds=lowerCamelCase__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
_UpperCAmelCase : Tuple = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_UpperCAmelCase : Tuple = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_UpperCAmelCase : Dict = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_UpperCAmelCase : Union[str, Any] = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase : Any = model.get_decoder()
decoder.save_pretrained(lowerCamelCase__ )
_UpperCAmelCase : Dict = AutoformerDecoder.from_pretrained(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = decoder(
trend=lowerCamelCase__ , inputs_embeds=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class _UpperCAmelCase ( A__ ,A__ ,unittest.TestCase ):
'''simple docstring'''
a__ =(AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
a__ =(AutoformerForPrediction,) if is_torch_available() else ()
a__ ={'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
a__ =False
a__ =False
a__ =False
a__ =False
a__ =False
a__ =False
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : List[Any] = AutoformerModelTester(self )
_UpperCAmelCase : List[Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def __lowerCAmelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_UpperCAmelCase : Any = model_class(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ )
_UpperCAmelCase : List[str] = model_class.from_pretrained(lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertEqual(info['''missing_keys'''] , [] )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*lowerCamelCase__ )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def __lowerCAmelCase ( self ) -> Optional[int]:
pass
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : str = inspect.signature(getattr(lowerCamelCase__ , '''forward''' ) )
# The main input is the name of the argument after `self`
_UpperCAmelCase : int = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , lowerCamelCase__ )
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : List[Any] = model_class(lowerCamelCase__ )
_UpperCAmelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Tuple = [*signature.parameters.keys()]
_UpperCAmelCase : Any = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(lowerCamelCase__ )] , lowerCamelCase__ )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Any = True
_UpperCAmelCase : int = getattr(self.model_tester , '''seq_length''' , lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = getattr(self.model_tester , '''decoder_seq_length''' , lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = getattr(self.model_tester , '''encoder_seq_length''' , lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = getattr(self.model_tester , '''d_model''' , lowerCamelCase__ )
_UpperCAmelCase : Tuple = getattr(self.model_tester , '''num_attention_heads''' , lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = d_model // num_attention_heads
for model_class in self.all_model_classes:
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Optional[Any] = True
_UpperCAmelCase : Optional[Any] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : str = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
_UpperCAmelCase : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCAmelCase : List[str] = True
_UpperCAmelCase : Any = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : str = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
_UpperCAmelCase : str = outputs.encoder_attentions
self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_UpperCAmelCase : int = len(lowerCamelCase__ )
_UpperCAmelCase : Tuple = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
# decoder attentions
_UpperCAmelCase : List[Any] = outputs.decoder_attentions
self.assertIsInstance(lowerCamelCase__ , (list, tuple) )
self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_UpperCAmelCase : Optional[int] = outputs.cross_attentions
self.assertIsInstance(lowerCamelCase__ , (list, tuple) )
self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_UpperCAmelCase : int = True
_UpperCAmelCase : List[str] = True
_UpperCAmelCase : Union[str, Any] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(out_len + 2 , len(lowerCamelCase__ ) )
_UpperCAmelCase : str = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def __lowerCAmelCase ( self ) -> List[Any]:
super().test_retain_grad_hidden_states_attentions()
def lowerCamelCase_ (UpperCamelCase__ : Any="train-batch.pt" ):
_UpperCAmelCase : Union[str, Any] = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=__a , repo_type='''dataset''' )
_UpperCAmelCase : Tuple = torch.load(__a , map_location=__a )
return batch
@require_torch
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Optional[Any] = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = prepare_batch()
with torch.no_grad():
_UpperCAmelCase : int = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
_UpperCAmelCase : Union[str, Any] = torch.Size(
(6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , lowerCamelCase__ )
_UpperCAmelCase : List[str] = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=lowerCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Optional[Any] = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(lowerCamelCase__ )
_UpperCAmelCase : int = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
_UpperCAmelCase : Any = torch.Size((6_4, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , lowerCamelCase__ )
_UpperCAmelCase : Tuple = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=lowerCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Optional[Any] = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(lowerCamelCase__ )
_UpperCAmelCase : int = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
_UpperCAmelCase : List[str] = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
_UpperCAmelCase : List[str] = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , lowerCamelCase__ )
_UpperCAmelCase : Tuple = torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=lowerCamelCase__ )
_UpperCAmelCase : Tuple = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , lowerCamelCase__ , rtol=1E-1 ) )
| 506 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]:
a__ : Union[str, Any] = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
a__ : Union[str, Any] = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__a ):
os.makedirs(__a )
a__ : Any = model.state_dict()
def to_tf_var_name(__a ):
for patt, repl in iter(__a ):
a__ : Tuple = name.replace(__a , __a )
return f'''bert/{name}'''
def create_tf_var(__a , __a , __a ):
a__ : Tuple = tf.dtypes.as_dtype(tensor.dtype )
a__ : Dict = tf.get_variable(dtype=__a , shape=tensor.shape , name=__a , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__a )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
a__ : int = to_tf_var_name(__a )
a__ : Union[str, Any] = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
a__ : int = torch_tensor.T
a__ : Optional[Any] = create_tf_var(tensor=__a , name=__a , session=__a )
tf.keras.backend.set_value(__a , __a )
a__ : int = session.run(__a )
print(f'''Successfully created {tf_name}: {np.allclose(__a , __a )}''' )
a__ : Any = tf.train.Saver(tf.trainable_variables() )
saver.save(__a , os.path.join(__a , model_name.replace("-" , "_" ) + ".ckpt" ) )
def UpperCamelCase_ ( __a=None ) -> int:
a__ : Dict = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=__a , required=__a , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=__a , default=__a , required=__a , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=__a , required=__a , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=__a , required=__a , help="Directory in which to save tensorflow model" )
a__ : Optional[Any] = parser.parse_args(__a )
a__ : Tuple = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__a , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 37 | 0 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : str ):
_a = [0] * len(__a )
for i in range(1, len(__a ) ):
# use last results for better performance - dynamic programming
_a = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_a = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_a = j
return prefix_result
def _lowercase ( lowerCamelCase__ : List[str] ):
return max(prefix_function(__a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 131 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class A__ :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str=13 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Any=24 , lowerCamelCase__ : Optional[Any]=16 , lowerCamelCase__ : int=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[Any]=32 , lowerCamelCase__ : List[str]=5 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Optional[Any]=37 , lowerCamelCase__ : Any="gelu" , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : str=10 , lowerCamelCase__ : Optional[Any]=0.02 , lowerCamelCase__ : str=None , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : Optional[Any]=2 , ):
a__ : str = parent
a__ : Any = batch_size
a__ : Dict = patch_size
a__ : List[Any] = max_length
a__ : str = num_mel_bins
a__ : Optional[Any] = is_training
a__ : Optional[int] = use_labels
a__ : List[Any] = hidden_size
a__ : str = num_hidden_layers
a__ : Any = num_attention_heads
a__ : Union[str, Any] = intermediate_size
a__ : List[str] = hidden_act
a__ : str = hidden_dropout_prob
a__ : Tuple = attention_probs_dropout_prob
a__ : List[Any] = type_sequence_label_size
a__ : Any = initializer_range
a__ : str = scope
a__ : List[str] = frequency_stride
a__ : Union[str, Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
a__ : List[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
a__ : List[str] = (self.max_length - self.patch_size) // self.time_stride + 1
a__ : Tuple = frequency_out_dimension * time_out_dimension
a__ : List[str] = num_patches + 2
def _UpperCamelCase( self : List[str] ):
a__ : Any = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
a__ : List[Any] = None
if self.use_labels:
a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : List[str] = self.get_config()
return config, input_values, labels
def _UpperCamelCase( self : Optional[int] ):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] ):
a__ : List[Any] = ASTModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : Dict = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase( self : str ):
a__ : Dict = self.prepare_config_and_inputs()
(
(
a__
), (
a__
), (
a__
),
) : Optional[int] = config_and_inputs
a__ : List[Any] = {"input_values": input_values}
return config, inputs_dict
@require_torch
class A__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
_lowercase = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
_lowercase = False
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _UpperCamelCase( self : str ):
a__ : str = ASTModelTester(self )
a__ : Any = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def _UpperCamelCase( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def _UpperCamelCase( self : List[str] ):
pass
def _UpperCamelCase( self : Optional[int] ):
a__, a__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Any = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def _UpperCamelCase( self : Tuple ):
a__, a__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Dict = model_class(lowerCamelCase__ )
a__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Optional[int] = [*signature.parameters.keys()]
a__ : Optional[Any] = ["input_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
a__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
@slow
def _UpperCamelCase( self : int ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Union[str, Any] = ASTModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def UpperCamelCase_ ( ) -> Any:
a__ : Optional[int] = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" )
a__, a__ : List[str] = torchaudio.load(__a )
return audio, sampling_rate
@require_torch
@require_torchaudio
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase( self : List[str] ):
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def _UpperCamelCase( self : Optional[int] ):
a__ : int = self.default_feature_extractor
a__ : Optional[Any] = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(lowerCamelCase__ )
a__ : Any = self.default_feature_extractor
a__, a__ : Dict = prepare_audio()
a__ : str = audio.squeeze().numpy()
a__ : Any = feature_extractor(lowerCamelCase__ , sampling_rate=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Any = model(**lowerCamelCase__ )
# verify the logits
a__ : Union[str, Any] = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
a__ : List[str] = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 37 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 77 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class A__ ( A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = XGLMTokenizer
_lowercase = XGLMTokenizerFast
_lowercase = True
_lowercase = True
def _UpperCamelCase( self : List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
a__ : str = XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCamelCase( self : List[Any] ):
a__ : int = "<pad>"
a__ : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
a__ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(len(lowerCamelCase__ ) , 1_008 )
def _UpperCamelCase( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_008 )
def _UpperCamelCase( self : Optional[int] ):
a__ : str = XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
a__ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
a__ : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a__ : List[str] = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
a__ : Dict = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _UpperCamelCase( self : Dict ):
return XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
def _UpperCamelCase( self : Union[str, Any] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCamelCase__ , f.name )
a__ : Any = XGLMTokenizer(f.name , keep_accents=lowerCamelCase__ )
a__ : List[str] = pickle.dumps(lowerCamelCase__ )
pickle.loads(lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
if not self.test_rust_tokenizer:
return
a__ : Any = self.get_tokenizer()
a__ : Optional[Any] = self.get_rust_tokenizer()
a__ : Tuple = "I was born in 92000, and this is falsé."
a__ : List[str] = tokenizer.tokenize(lowerCamelCase__ )
a__ : Union[str, Any] = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : Optional[int] = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
a__ : Union[str, Any] = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : List[str] = self.get_rust_tokenizer()
a__ : Tuple = tokenizer.encode(lowerCamelCase__ )
a__ : Optional[Any] = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def _UpperCamelCase( self : List[str] ):
a__ : Union[str, Any] = "Hello World!"
a__ : List[str] = [2, 31_227, 4_447, 35]
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def _UpperCamelCase( self : Union[str, Any] ):
a__ : Optional[int] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
a__ : Union[str, Any] = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def _UpperCamelCase( self : List[Any] ):
# fmt: off
a__ : Optional[int] = {
"input_ids": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name="facebook/xglm-564M" , padding=lowerCamelCase__ , )
| 37 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class _SCREAMING_SNAKE_CASE( A__ ):
SCREAMING_SNAKE_CASE_ : Dict = '''mobilenet_v1'''
def __init__( self ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=2_24 ,SCREAMING_SNAKE_CASE__=1.0 ,SCREAMING_SNAKE_CASE__=8 ,SCREAMING_SNAKE_CASE__="relu6" ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=0.9_9_9 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=0.0_0_1 ,**SCREAMING_SNAKE_CASE__ ,) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase__ )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
__SCREAMING_SNAKE_CASE :Tuple = num_channels
__SCREAMING_SNAKE_CASE :Union[str, Any] = image_size
__SCREAMING_SNAKE_CASE :str = depth_multiplier
__SCREAMING_SNAKE_CASE :int = min_depth
__SCREAMING_SNAKE_CASE :Optional[Any] = hidden_act
__SCREAMING_SNAKE_CASE :List[str] = tf_padding
__SCREAMING_SNAKE_CASE :Any = classifier_dropout_prob
__SCREAMING_SNAKE_CASE :List[Any] = initializer_range
__SCREAMING_SNAKE_CASE :List[str] = layer_norm_eps
class _SCREAMING_SNAKE_CASE( A__ ):
SCREAMING_SNAKE_CASE_ : int = version.parse('''1.11''' )
@property
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return 1E-4 | 498 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCamelCase_ ( ) -> int:
a__ : int = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"
a__ : Optional[Any] = Image.open(requests.get(__a , stream=__a ).raw ).convert("RGB" )
return image
def UpperCamelCase_ ( __a ) -> Optional[Any]:
a__ : Any = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") )
# fmt: on
return rename_keys
def UpperCamelCase_ ( __a , __a , __a ) -> List[str]:
a__ : Union[str, Any] = dct.pop(__a )
a__ : List[str] = val
def UpperCamelCase_ ( __a , __a ) -> Optional[Any]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
a__ : Any = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
a__ : Tuple = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
a__ : str = torch.cat((q_bias, torch.zeros_like(__a , requires_grad=__a ), v_bias) )
a__ : int = qkv_bias
def UpperCamelCase_ ( __a ) -> Dict:
a__ : Tuple = 364 if "coco" in model_name else 224
a__ : int = InstructBlipVisionConfig(image_size=__a ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
a__ : Tuple = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
a__ : Dict = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
a__ : List[Any] = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" , vocab_size=32_001 ).to_dict()
elif "vicuna-13b" in model_name:
a__ : Optional[int] = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" , vocab_size=32_001 ).to_dict()
else:
raise ValueError("Model name not supported" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
a__ : Optional[Any] = InstructBlipQFormerConfig(vocab_size=30_523 ).to_dict()
a__ : Any = InstructBlipConfig(vision_config=__a , text_config=__a , qformer_config=__a )
return config, image_size
@torch.no_grad()
def UpperCamelCase_ ( __a , __a=None , __a=False ) -> int:
a__ : Tuple = AutoTokenizer.from_pretrained("bert-base-uncased" , truncation_side="left" )
qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} )
if "t5" in model_name:
a__ : List[Any] = TaTokenizerFast.from_pretrained("google/flan-t5-xl" , truncation_side="left" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
a__ : Union[str, Any] = LlamaTokenizerFast.from_pretrained(
"huggyllama/llama-7b" , truncation_side="left" , bos_token="</s>" , unk_token="</s>" )
tokenizer.add_special_tokens({"pad_token": "[PAD]"} )
a__, a__ : List[str] = get_blipa_config(__a )
a__ : Any = InstructBlipForConditionalGeneration(__a ).eval()
a__ : Dict = {
"instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"),
"instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"),
"instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"),
"instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"),
}
a__, a__ : Dict = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
a__ : Optional[Any] = "cuda:1" if torch.cuda.is_available() else "cpu"
a__ : List[Any] = "cuda:2" if torch.cuda.is_available() else "cpu"
a__, a__, a__ : Tuple = load_model_and_preprocess(
name=__a , model_type=__a , is_eval=__a , device=__a )
original_model.eval()
print("Done!" )
# update state dict keys
a__ : Dict = original_model.state_dict()
a__ : Optional[int] = create_rename_keys(__a )
for src, dest in rename_keys:
rename_key(__a , __a , __a )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
a__ : Optional[int] = state_dict.pop(__a )
if key.startswith("Qformer.bert" ):
a__ : List[Any] = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
a__ : Any = key.replace("self" , "attention" )
if "llm_proj" in key:
a__ : Dict = key.replace("llm_proj" , "language_projection" )
if "t5_proj" in key:
a__ : int = key.replace("t5_proj" , "language_projection" )
if key.startswith("llm_model" ):
a__ : List[str] = key.replace("llm_model" , "language_model" )
if key.startswith("t5" ):
a__ : str = key.replace("t5" , "language" )
a__ : Dict = val
# read in qv biases
read_in_q_v_bias(__a , __a )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(__a , strict=__a )
a__ : Union[str, Any] = load_demo_image()
a__ : int = "What is unusual about this image?"
# create processor
a__ : Any = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=__a , image_std=__a )
a__ : Tuple = InstructBlipProcessor(
image_processor=__a , tokenizer=__a , qformer_tokenizer=__a , )
a__ : Tuple = processor(images=__a , text=__a , return_tensors="pt" ).to(__a )
# make sure processor creates exact same pixel values
a__ : Optional[int] = vis_processors["eval"](__a ).unsqueeze(0 ).to(__a )
a__ : Optional[Any] = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , __a )
original_model.to(__a )
hf_model.to(__a )
with torch.no_grad():
if "vicuna" in model_name:
a__ : str = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits
a__ : List[str] = hf_model(**__a ).logits
else:
a__ : List[Any] = original_model(
{"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits
a__ : str = tokenizer("\n" , return_tensors="pt" ).input_ids.to(__a )
a__ : Dict = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
a__ : Any = hf_model(**__a , labels=__a ).logits
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
a__ : Tuple = 1e-4 if "vicuna" in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) , __a , atol=__a )
print("Looks ok!" )
print("Generating with original model..." )
a__ : Tuple = original_model.generate({"image": original_pixel_values, "prompt": prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("Generating with HF model..." )
a__ : int = hf_model.generate(
**__a , do_sample=__a , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
a__ : int = 2
print("Original generation:" , __a )
a__ : str = processor.batch_decode(__a , skip_special_tokens=__a )
a__ : str = [text.strip() for text in output_text]
print("HF generation:" , __a )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__a )
hf_model.save_pretrained(__a )
if push_to_hub:
processor.push_to_hub(f'''Salesforce/{model_name}''' )
hf_model.push_to_hub(f'''Salesforce/{model_name}''' )
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
UpperCamelCase : Optional[int] = [
"""instructblip-vicuna-7b""",
"""instructblip-vicuna-13b""",
"""instructblip-flan-t5-xl""",
"""instructblip-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""instructblip-flan-t5-xl""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
UpperCamelCase : Dict = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 37 | 0 |
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class a ( A__ ):
"""simple docstring"""
def __init__( self , snake_case_=0.0_1 , snake_case_=1000 ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = p_stop
__UpperCAmelCase: List[str] = max_length
def __iter__( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = 0
__UpperCAmelCase: Any = False
while not stop and count < self.max_length:
yield count
count += 1
__UpperCAmelCase: List[str] = random.random() < self.p_stop
class a ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_=False , snake_case_=True ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = [
BatchSamplerShard(lowerCamelCase__ , 2 , lowerCamelCase__ , split_batches=lowerCamelCase__ , even_batches=lowerCamelCase__ )
for i in range(2 )
]
__UpperCAmelCase: Optional[int] = [list(lowerCamelCase__ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(lowerCamelCase__ ) for shard in batch_sampler_shards] , [len(lowerCamelCase__ ) for e in expected] )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCamelCase__ )
__UpperCAmelCase: Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase: Tuple = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCamelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__UpperCAmelCase: int = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCamelCase__ )
__UpperCAmelCase: Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase: int = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCamelCase__ )
__UpperCAmelCase: Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__UpperCAmelCase: Optional[int] = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCamelCase__ )
__UpperCAmelCase: int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase: Union[str, Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCamelCase__ )
__UpperCAmelCase: Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__UpperCAmelCase: List[str] = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCamelCase__ )
__UpperCAmelCase: str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase: List[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCamelCase__ )
__UpperCAmelCase: int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ )
# Check the shards when the dataset is very small.
__UpperCAmelCase: str = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCamelCase__ )
__UpperCAmelCase: List[Any] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase: List[str] = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCamelCase__ )
__UpperCAmelCase: int = [[], []]
self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: int = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCamelCase__ )
__UpperCAmelCase: str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , split_batches=lowerCamelCase__ )
__UpperCAmelCase: str = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCamelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , split_batches=lowerCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size.
__UpperCAmelCase: Any = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCamelCase__ )
__UpperCAmelCase: Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , split_batches=lowerCamelCase__ )
__UpperCAmelCase: Any = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCamelCase__ )
__UpperCAmelCase: Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , split_batches=lowerCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__UpperCAmelCase: List[Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCamelCase__ )
__UpperCAmelCase: int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , split_batches=lowerCamelCase__ )
__UpperCAmelCase: List[str] = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCamelCase__ )
__UpperCAmelCase: Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , split_batches=lowerCamelCase__ )
# Check the shards when the dataset is very small.
__UpperCAmelCase: Optional[int] = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCamelCase__ )
__UpperCAmelCase: Optional[int] = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , split_batches=lowerCamelCase__ )
__UpperCAmelCase: Optional[int] = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCamelCase__ )
__UpperCAmelCase: str = [[], []]
self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , split_batches=lowerCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: str = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCamelCase__ )
__UpperCAmelCase: Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , even_batches=lowerCamelCase__ )
__UpperCAmelCase: str = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCamelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , even_batches=lowerCamelCase__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__UpperCAmelCase: int = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCamelCase__ )
__UpperCAmelCase: int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , even_batches=lowerCamelCase__ )
__UpperCAmelCase: Any = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCamelCase__ )
__UpperCAmelCase: str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , even_batches=lowerCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__UpperCAmelCase: Optional[int] = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCamelCase__ )
__UpperCAmelCase: Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , even_batches=lowerCamelCase__ )
__UpperCAmelCase: int = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCamelCase__ )
__UpperCAmelCase: int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , even_batches=lowerCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__UpperCAmelCase: int = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCamelCase__ )
__UpperCAmelCase: int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , even_batches=lowerCamelCase__ )
__UpperCAmelCase: Optional[int] = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCamelCase__ )
__UpperCAmelCase: Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , even_batches=lowerCamelCase__ )
# Check the shards when the dataset is very small.
__UpperCAmelCase: int = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCamelCase__ )
__UpperCAmelCase: List[str] = [[[0, 1]], []]
self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , even_batches=lowerCamelCase__ )
__UpperCAmelCase: List[str] = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCamelCase__ )
__UpperCAmelCase: Tuple = [[], []]
self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , even_batches=lowerCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: int = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCamelCase__ )
__UpperCAmelCase: Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , split_batches=lowerCamelCase__ , even_batches=lowerCamelCase__ )
__UpperCAmelCase: str = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCamelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , split_batches=lowerCamelCase__ , even_batches=lowerCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size.
__UpperCAmelCase: Tuple = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCamelCase__ )
__UpperCAmelCase: Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , split_batches=lowerCamelCase__ , even_batches=lowerCamelCase__ )
__UpperCAmelCase: List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCamelCase__ )
__UpperCAmelCase: List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , split_batches=lowerCamelCase__ , even_batches=lowerCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__UpperCAmelCase: Tuple = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCamelCase__ )
__UpperCAmelCase: Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , split_batches=lowerCamelCase__ , even_batches=lowerCamelCase__ )
__UpperCAmelCase: Union[str, Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCamelCase__ )
__UpperCAmelCase: Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , split_batches=lowerCamelCase__ , even_batches=lowerCamelCase__ )
# Check the shards when the dataset is very small.
__UpperCAmelCase: Tuple = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCamelCase__ )
__UpperCAmelCase: Union[str, Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , split_batches=lowerCamelCase__ , even_batches=lowerCamelCase__ )
__UpperCAmelCase: Union[str, Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCamelCase__ )
__UpperCAmelCase: List[str] = [[], []]
self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , split_batches=lowerCamelCase__ , even_batches=lowerCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: List[str] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__UpperCAmelCase: Optional[Any] = [BatchSamplerShard(lowerCamelCase__ , 2 , lowerCamelCase__ , even_batches=lowerCamelCase__ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=False , snake_case_=2 , snake_case_=False ):
'''simple docstring'''
random.seed(lowerCamelCase__ )
__UpperCAmelCase: int = list(lowerCamelCase__ )
__UpperCAmelCase: Dict = [
IterableDatasetShard(
lowerCamelCase__ , batch_size=lowerCamelCase__ , drop_last=lowerCamelCase__ , num_processes=lowerCamelCase__ , process_index=lowerCamelCase__ , split_batches=lowerCamelCase__ , )
for i in range(lowerCamelCase__ )
]
__UpperCAmelCase: Union[str, Any] = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(lowerCamelCase__ )
iterable_dataset_lists.append(list(lowerCamelCase__ ) )
__UpperCAmelCase: Dict = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__UpperCAmelCase: Dict = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
self.assertTrue(len(lowerCamelCase__ ) % shard_batch_size == 0 )
__UpperCAmelCase: List[Any] = []
for idx in range(0 , len(lowerCamelCase__ ) , lowerCamelCase__ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(lowerCamelCase__ ) < len(lowerCamelCase__ ):
reference += reference
self.assertListEqual(lowerCamelCase__ , reference[: len(lowerCamelCase__ )] )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = 42
__UpperCAmelCase: str = RandomIterableDataset()
self.check_iterable_dataset_shards(lowerCamelCase__ , lowerCamelCase__ , batch_size=4 , drop_last=lowerCamelCase__ , split_batches=lowerCamelCase__ )
self.check_iterable_dataset_shards(lowerCamelCase__ , lowerCamelCase__ , batch_size=4 , drop_last=lowerCamelCase__ , split_batches=lowerCamelCase__ )
self.check_iterable_dataset_shards(lowerCamelCase__ , lowerCamelCase__ , batch_size=4 , drop_last=lowerCamelCase__ , split_batches=lowerCamelCase__ )
self.check_iterable_dataset_shards(lowerCamelCase__ , lowerCamelCase__ , batch_size=4 , drop_last=lowerCamelCase__ , split_batches=lowerCamelCase__ )
# Edge case with a very small dataset
__UpperCAmelCase: Union[str, Any] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(lowerCamelCase__ , lowerCamelCase__ , batch_size=4 , drop_last=lowerCamelCase__ , split_batches=lowerCamelCase__ )
self.check_iterable_dataset_shards(lowerCamelCase__ , lowerCamelCase__ , batch_size=4 , drop_last=lowerCamelCase__ , split_batches=lowerCamelCase__ )
self.check_iterable_dataset_shards(lowerCamelCase__ , lowerCamelCase__ , batch_size=4 , drop_last=lowerCamelCase__ , split_batches=lowerCamelCase__ )
self.check_iterable_dataset_shards(lowerCamelCase__ , lowerCamelCase__ , batch_size=4 , drop_last=lowerCamelCase__ , split_batches=lowerCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: int = BatchSampler(range(16 ) , batch_size=4 , drop_last=lowerCamelCase__ )
__UpperCAmelCase: str = SkipBatchSampler(lowerCamelCase__ , 2 )
self.assertListEqual(list(lowerCamelCase__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Dict = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = DataLoader(list(range(16 ) ) , batch_size=4 )
__UpperCAmelCase: Optional[int] = skip_first_batches(lowerCamelCase__ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: List[str] = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(lowerCamelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowerCamelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def lowercase_ ( self ):
'''simple docstring'''
Accelerator()
__UpperCAmelCase: List[Any] = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(lowerCamelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowerCamelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) | 523 |
def UpperCamelCase_ ( __a , __a ) -> Tuple:
a__ : Optional[int] = [0 for i in range(r + 1 )]
# nc0 = 1
a__ : Union[str, Any] = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
a__ : Any = min(__a , __a )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 37 | 0 |
'''simple docstring'''
from math import ceil
def lowerCAmelCase ( UpperCamelCase__ : int = 1_0_0_1 ):
"""simple docstring"""
__UpperCAmelCase = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
__UpperCAmelCase = 2 * i + 1
__UpperCAmelCase = 2 * i
__UpperCAmelCase = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__lowerCAmelCase : List[str] = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number")
| 262 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase : Optional[Any] = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
UpperCamelCase : Dict = {
"""allenai/led-base-16384""": 1_6384,
}
class A__ ( A__ ):
"""simple docstring"""
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = LEDTokenizer
_lowercase = ['input_ids', 'attention_mask']
def __init__( self : Tuple , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : int="replace" , lowerCamelCase__ : Union[str, Any]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Tuple="</s>" , lowerCamelCase__ : Optional[int]="<s>" , lowerCamelCase__ : str="<unk>" , lowerCamelCase__ : Any="<pad>" , lowerCamelCase__ : Any="<mask>" , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : int=True , **lowerCamelCase__ : Union[str, Any] , ):
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , )
a__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space:
a__ : List[str] = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) )
a__ : Optional[Any] = add_prefix_space
a__ : List[str] = pre_tok_class(**lowerCamelCase__ )
a__ : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
a__ : Any = "post_processor"
a__ : str = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
if tokenizer_component_instance:
a__ : Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a__ : Optional[Any] = tuple(state["sep"] )
if "cls" in state:
a__ : Optional[Any] = tuple(state["cls"] )
a__ : Optional[int] = False
if state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space:
a__ : Dict = add_prefix_space
a__ : int = True
if state.get("trim_offsets" , lowerCamelCase__ ) != trim_offsets:
a__ : List[Any] = trim_offsets
a__ : List[str] = True
if changes_to_apply:
a__ : int = getattr(lowerCamelCase__ , state.pop("type" ) )
a__ : int = component_class(**lowerCamelCase__ )
setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _UpperCamelCase( self : Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Union[str, Any] ):
a__ : Any = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value
a__ : Union[str, Any] = value
def _UpperCamelCase( self : Any , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ):
a__ : List[str] = kwargs.get("is_split_into_words" , lowerCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : Any , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Optional[Any] ):
a__ : Dict = kwargs.get("is_split_into_words" , lowerCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ):
a__ : List[str] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=None ):
a__ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ):
a__ : List[str] = [self.sep_token_id]
a__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase( self : Dict , lowerCamelCase__ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , ):
a__ : str = super()._pad(
encoded_inputs=lowerCamelCase__ , max_length=lowerCamelCase__ , padding_strategy=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
# Load from model defaults
if return_attention_mask is None:
a__ : Optional[int] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
a__ : Tuple = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
a__ : Dict = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase__ )
if needs_to_be_padded:
a__ : Union[str, Any] = len(lowerCamelCase__ ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
a__ : List[Any] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
a__ : Any = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 37 | 0 |
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
UpperCamelCase =logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def snake_case ( a_ : List[Any] ) -> Any:
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def snake_case ( a_ : int , a_ : Optional[Any] , a_ : Optional[int] ) -> Any:
"""simple docstring"""
return max(metric_fn(__a , __a ) for gt in ground_truths )
def snake_case ( a_ : int , a_ : List[str] , a_ : Any ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Tuple = [line.strip() for line in open(__a , """r""" ).readlines()]
UpperCamelCase_ : Tuple = []
if args.gold_data_mode == "qa":
UpperCamelCase_ : Any = pd.read_csv(__a , sep="""\t""" , header=__a )
for answer_list in data[1]:
UpperCamelCase_ : Union[str, Any] = ast.literal_eval(__a )
answers.append(__a )
else:
UpperCamelCase_ : List[str] = [line.strip() for line in open(__a , """r""" ).readlines()]
UpperCamelCase_ : List[str] = [[reference] for reference in references]
UpperCamelCase_ : List[str] = 0
for prediction, ground_truths in zip(__a , __a ):
total += 1
em += metric_max_over_ground_truths(__a , __a , __a )
fa += metric_max_over_ground_truths(__a , __a , __a )
UpperCamelCase_ : Dict = 100.0 * em / total
UpperCamelCase_ : Optional[Any] = 100.0 * fa / total
logger.info(f"F1: {fa:.2f}" )
logger.info(f"EM: {em:.2f}" )
def snake_case ( a_ : Dict , a_ : Dict , a_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = args.k
UpperCamelCase_ : str = [line.strip() for line in open(__a , """r""" ).readlines()]
UpperCamelCase_ : Tuple = [line.strip() for line in open(__a , """r""" ).readlines()]
UpperCamelCase_ : Tuple = 0
for hypo, reference in zip(__a , __a ):
UpperCamelCase_ : Any = set(hypo.split("""\t""" )[:k] )
UpperCamelCase_ : Union[str, Any] = set(reference.split("""\t""" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
UpperCamelCase_ : Union[str, Any] = 100.0 * em / total
logger.info(f"Precision@{k}: {em: .2f}" )
def snake_case ( a_ : Union[str, Any] , a_ : Tuple , a_ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
def strip_title(a_ : List[str] ):
if title.startswith("""\"""" ):
UpperCamelCase_ : Optional[Any] = title[1:]
if title.endswith("""\"""" ):
UpperCamelCase_ : Union[str, Any] = title[:-1]
return title
UpperCamelCase_ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__a , return_tensors="""pt""" , padding=__a , truncation=__a , )["input_ids"].to(args.device )
UpperCamelCase_ : Optional[int] = rag_model.rag.question_encoder(__a )
UpperCamelCase_ : Union[str, Any] = question_enc_outputs[0]
UpperCamelCase_ : Optional[int] = rag_model.retriever(
__a , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , )
UpperCamelCase_ : List[Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
UpperCamelCase_ : int = []
for docs in all_docs:
UpperCamelCase_ : Optional[int] = [strip_title(__a ) for title in docs["title"]]
provenance_strings.append("""\t""".join(__a ) )
return provenance_strings
def snake_case ( a_ : int , a_ : List[Any] , a_ : Any ) -> Dict:
"""simple docstring"""
with torch.no_grad():
UpperCamelCase_ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__a , return_tensors="""pt""" , padding=__a , truncation=__a )
UpperCamelCase_ : Any = inputs_dict.input_ids.to(args.device )
UpperCamelCase_ : Dict = inputs_dict.attention_mask.to(args.device )
UpperCamelCase_ : Optional[int] = rag_model.generate( # rag_model overwrites generate
__a , attention_mask=__a , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__a , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
UpperCamelCase_ : int = rag_model.retriever.generator_tokenizer.batch_decode(__a , skip_special_tokens=__a )
if args.print_predictions:
for q, a in zip(__a , __a ):
logger.info("""Q: {} - A: {}""".format(__a , __a ) )
return answers
def snake_case ( ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : int = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=__a , help=(
"""RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"""
""" model_name_or_path"""
) , )
parser.add_argument(
"""--index_name""" , default=__a , choices=["""exact""", """compressed""", """legacy"""] , type=__a , help="""RAG model retriever type""" , )
parser.add_argument(
"""--index_path""" , default=__a , type=__a , help="""Path to the retrieval index""" , )
parser.add_argument("""--n_docs""" , default=5 , type=__a , help="""Number of retrieved docs""" )
parser.add_argument(
"""--model_name_or_path""" , default=__a , type=__a , required=__a , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=__a , help=(
"""Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"""
""" precision@k."""
) , )
parser.add_argument("""--k""" , default=1 , type=__a , help="""k for the precision@k calculation""" )
parser.add_argument(
"""--evaluation_set""" , default=__a , type=__a , required=__a , help="""Path to a file containing evaluation samples""" , )
parser.add_argument(
"""--gold_data_path""" , default=__a , type=__a , required=__a , help="""Path to a tab-separated file with gold samples""" , )
parser.add_argument(
"""--gold_data_mode""" , default="""qa""" , type=__a , choices=["""qa""", """ans"""] , help=(
"""Format of the gold data file"""
"""qa - a single line in the following format: question [tab] answer_list"""
"""ans - a single line of the gold file contains the expected answer string"""
) , )
parser.add_argument(
"""--predictions_path""" , type=__a , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , )
parser.add_argument(
"""--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , )
parser.add_argument(
"""--eval_batch_size""" , default=8 , type=__a , help="""Batch size per GPU/CPU for evaluation.""" , )
parser.add_argument(
"""--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , )
parser.add_argument(
"""--num_beams""" , default=4 , type=__a , help="""Number of beams to be used when generating answers""" , )
parser.add_argument("""--min_length""" , default=1 , type=__a , help="""Min length of the generated answers""" )
parser.add_argument("""--max_length""" , default=50 , type=__a , help="""Max length of the generated answers""" )
parser.add_argument(
"""--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , )
parser.add_argument(
"""--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , )
UpperCamelCase_ : int = parser.parse_args()
UpperCamelCase_ : Dict = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
return args
def snake_case ( a_ : List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Tuple = {}
if args.model_type is None:
UpperCamelCase_ : List[str] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("""rag""" ):
UpperCamelCase_ : int = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
UpperCamelCase_ : Tuple = args.n_docs
if args.index_name is not None:
UpperCamelCase_ : Any = args.index_name
if args.index_path is not None:
UpperCamelCase_ : int = args.index_path
else:
UpperCamelCase_ : Optional[Any] = BartForConditionalGeneration
UpperCamelCase_ : Tuple = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("""Evaluate the following checkpoints: %s""" , __a )
UpperCamelCase_ : Any = get_scores if args.eval_mode == "e2e" else get_precision_at_k
UpperCamelCase_ : Union[str, Any] = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) )
score_fn(__a , args.predictions_path , args.gold_data_path )
continue
logger.info("""***** Running evaluation for {} *****""".format(__a ) )
logger.info(""" Batch size = %d""" , args.eval_batch_size )
logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) )
if args.model_type.startswith("""rag""" ):
UpperCamelCase_ : str = RagRetriever.from_pretrained(__a , **__a )
UpperCamelCase_ : Optional[int] = model_class.from_pretrained(__a , retriever=__a , **__a )
model.retriever.init_retrieval()
else:
UpperCamelCase_ : Dict = model_class.from_pretrained(__a , **__a )
model.to(args.device )
with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file:
UpperCamelCase_ : List[Any] = []
for line in tqdm(__a ):
questions.append(line.strip() )
if len(__a ) == args.eval_batch_size:
UpperCamelCase_ : Union[str, Any] = evaluate_batch_fn(__a , __a , __a )
preds_file.write("""\n""".join(__a ) + """\n""" )
preds_file.flush()
UpperCamelCase_ : Any = []
if len(__a ) > 0:
UpperCamelCase_ : List[str] = evaluate_batch_fn(__a , __a , __a )
preds_file.write("""\n""".join(__a ) )
preds_file.flush()
score_fn(__a , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
UpperCamelCase =get_args()
main(args)
| 208 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase : Union[str, Any] = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
UpperCamelCase : List[str] = {
"""roberta-base""": 512,
"""roberta-large""": 512,
"""roberta-large-mnli""": 512,
"""distilroberta-base""": 512,
"""roberta-base-openai-detector""": 512,
"""roberta-large-openai-detector""": 512,
}
class A__ ( A__ ):
"""simple docstring"""
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = ['input_ids', 'attention_mask']
_lowercase = RobertaTokenizer
def __init__( self : List[str] , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]="replace" , lowerCamelCase__ : List[str]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Any="</s>" , lowerCamelCase__ : Any="<s>" , lowerCamelCase__ : int="<unk>" , lowerCamelCase__ : Any="<pad>" , lowerCamelCase__ : Tuple="<mask>" , lowerCamelCase__ : Any=False , lowerCamelCase__ : Dict=True , **lowerCamelCase__ : Optional[Any] , ):
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , )
a__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space:
a__ : Any = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) )
a__ : int = add_prefix_space
a__ : Tuple = pre_tok_class(**lowerCamelCase__ )
a__ : str = add_prefix_space
a__ : Tuple = "post_processor"
a__ : Dict = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
if tokenizer_component_instance:
a__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a__ : Tuple = tuple(state["sep"] )
if "cls" in state:
a__ : str = tuple(state["cls"] )
a__ : str = False
if state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space:
a__ : str = add_prefix_space
a__ : Any = True
if state.get("trim_offsets" , lowerCamelCase__ ) != trim_offsets:
a__ : int = trim_offsets
a__ : Dict = True
if changes_to_apply:
a__ : Union[str, Any] = getattr(lowerCamelCase__ , state.pop("type" ) )
a__ : str = component_class(**lowerCamelCase__ )
setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
@property
def _UpperCamelCase( self : Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Tuple ):
a__ : List[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value
a__ : List[str] = value
def _UpperCamelCase( self : Union[str, Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : int ):
a__ : Optional[int] = kwargs.get("is_split_into_words" , lowerCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : Tuple , *lowerCamelCase__ : Dict , **lowerCamelCase__ : List[str] ):
a__ : Dict = kwargs.get("is_split_into_words" , lowerCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : str , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ):
a__ : int = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int]=None ):
a__ : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCamelCase( self : Dict , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ):
a__ : Tuple = [self.sep_token_id]
a__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 37 | 0 |
from __future__ import annotations
class a__ :
def __init__( self : int , A_ : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_: List[str] = order
# a_{0} ... a_{k}
lowerCamelCase_: Optional[int] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
lowerCamelCase_: Dict = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
lowerCamelCase_: Union[str, Any] = [0.0] * self.order
# y[n-1] ... y[n-k]
lowerCamelCase_: Optional[Any] = [0.0] * self.order
def lowerCAmelCase ( self : List[str] , A_ : list[float] , A_ : list[float] ) -> Optional[Any]:
"""simple docstring"""
if len(lowerCamelCase__ ) < self.order:
lowerCamelCase_: Any = [1.0, *a_coeffs]
if len(lowerCamelCase__ ) != self.order + 1:
lowerCamelCase_: str = (
f"""Expected a_coeffs to have {self.order + 1} elements """
f"""for {self.order}-order filter, got {len(lowerCamelCase__ )}"""
)
raise ValueError(lowerCamelCase__ )
if len(lowerCamelCase__ ) != self.order + 1:
lowerCamelCase_: Union[str, Any] = (
f"""Expected b_coeffs to have {self.order + 1} elements """
f"""for {self.order}-order filter, got {len(lowerCamelCase__ )}"""
)
raise ValueError(lowerCamelCase__ )
lowerCamelCase_: Union[str, Any] = a_coeffs
lowerCamelCase_: Union[str, Any] = b_coeffs
def lowerCAmelCase ( self : Tuple , A_ : float ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_: List[Any] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
lowerCamelCase_: List[str] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
lowerCamelCase_: str = self.input_history[:-1]
lowerCamelCase_: Any = self.output_history[:-1]
lowerCamelCase_: List[Any] = sample
lowerCamelCase_: Tuple = result
return result
| 423 |
from statistics import mean, stdev
def UpperCamelCase_ ( __a , __a = 3 ) -> list:
a__ : List[str] = min(__a )
a__ : str = max(__a )
# normalize data
return [round((x - x_min) / (x_max - x_min) , __a ) for x in data]
def UpperCamelCase_ ( __a , __a = 3 ) -> list:
a__ : str = mean(__a )
a__ : List[str] = stdev(__a )
# standardize data
return [round((x - mu) / (sigma) , __a ) for x in data]
| 37 | 0 |
from __future__ import annotations
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Any = None, lowerCAmelCase_ : Optional[Any] = None ):
if start is None:
__lowerCAmelCase = 0
if end is None:
__lowerCAmelCase = len(__a ) - 1
if start >= end:
return
__lowerCAmelCase = (start + end) // 2
slowsort(__a, __a, __a )
slowsort(__a, mid + 1, __a )
if sequence[end] < sequence[mid]:
__lowerCAmelCase = sequence[mid], sequence[end]
slowsort(__a, __a, end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 53 |
def UpperCamelCase_ ( __a = 50 ) -> int:
a__ : Tuple = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 37 | 0 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__A : Optional[Any] = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class A_ (A__ , unittest.TestCase ):
UpperCAmelCase__ = DebertaVaTokenizer
UpperCAmelCase__ = DebertaVaTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = True
def _lowercase ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = DebertaVaTokenizer(lowerCamelCase__ , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = "this is a test"
UpperCAmelCase = "this is a test"
return input_text, output_text
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = "<pad>"
UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(lowerCamelCase__ ) , 3_0_0_0_1 )
def _lowercase ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = " \tHeLLo!how \n Are yoU? "
UpperCAmelCase = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
UpperCAmelCase = DebertaVaTokenizer(lowerCamelCase__ , do_lower_case=lowerCamelCase__ )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase = DebertaVaTokenizerFast(lowerCamelCase__ , do_lower_case=lowerCamelCase__ )
UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def _lowercase ( self ):
'''simple docstring'''
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def _lowercase ( self ):
'''simple docstring'''
pass
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = "I was born in 92000, and this is falsé."
UpperCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase = DebertaVaTokenizer(lowerCamelCase__ , split_by_punct=lowerCamelCase__ )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase = DebertaVaTokenizerFast(lowerCamelCase__ , split_by_punct=lowerCamelCase__ )
UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = "I was born in 92000, and this is falsé."
UpperCAmelCase = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase = DebertaVaTokenizer(lowerCamelCase__ , do_lower_case=lowerCamelCase__ , split_by_punct=lowerCamelCase__ )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase = DebertaVaTokenizerFast(lowerCamelCase__ , do_lower_case=lowerCamelCase__ , split_by_punct=lowerCamelCase__ )
UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = "I was born in 92000, and this is falsé."
UpperCAmelCase = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
UpperCAmelCase = DebertaVaTokenizer(lowerCamelCase__ , do_lower_case=lowerCamelCase__ , split_by_punct=lowerCamelCase__ )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase = DebertaVaTokenizerFast(lowerCamelCase__ , do_lower_case=lowerCamelCase__ , split_by_punct=lowerCamelCase__ )
UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = "I was born in 92000, and this is falsé."
UpperCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase = DebertaVaTokenizer(lowerCamelCase__ , do_lower_case=lowerCamelCase__ , split_by_punct=lowerCamelCase__ )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase = DebertaVaTokenizerFast(lowerCamelCase__ , do_lower_case=lowerCamelCase__ , split_by_punct=lowerCamelCase__ )
UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = " \tHeLLo!how \n Are yoU? "
UpperCAmelCase = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
UpperCAmelCase = DebertaVaTokenizer(lowerCamelCase__ , do_lower_case=lowerCamelCase__ , split_by_punct=lowerCamelCase__ )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase = DebertaVaTokenizerFast(lowerCamelCase__ , do_lower_case=lowerCamelCase__ , split_by_punct=lowerCamelCase__ )
UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = "I was born in 92000, and this is falsé."
UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) )
UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
UpperCAmelCase = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = tokenizer.encode(lowerCamelCase__ )
UpperCAmelCase = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = "This is a test"
UpperCAmelCase = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9]
UpperCAmelCase = ["▁", "T", "his", "▁is", "▁a", "▁test"]
UpperCAmelCase = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
UpperCAmelCase = DebertaVaTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
UpperCAmelCase = DebertaVaTokenizerFast(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
UpperCAmelCase = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# fmt: off
UpperCAmelCase = "I was born in 92000, and this is falsé."
UpperCAmelCase = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9]
UpperCAmelCase = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
UpperCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
UpperCAmelCase = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = DebertaVaTokenizer(lowerCamelCase__ )
UpperCAmelCase = tokenizer.encode('''sequence builders''' )
UpperCAmelCase = tokenizer.encode('''multi-sequence build''' )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , lowerCamelCase__ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , lowerCamelCase__ , )
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = {"input_ids": [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 130 |
class A__ :
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] ):
a__ : str = name
a__ : Optional[int] = value
a__ : Dict = weight
def __repr__( self : Union[str, Any] ):
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def _UpperCamelCase( self : Dict ):
return self.value
def _UpperCamelCase( self : Optional[Any] ):
return self.name
def _UpperCamelCase( self : Optional[Any] ):
return self.weight
def _UpperCamelCase( self : Optional[int] ):
return self.value / self.weight
def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]:
a__ : Optional[Any] = []
for i in range(len(__a ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def UpperCamelCase_ ( __a , __a , __a ) -> Union[str, Any]:
a__ : List[str] = sorted(__a , key=__a , reverse=__a )
a__ : List[Any] = []
a__, a__ : Union[str, Any] = 0.0, 0.0
for i in range(len(__a ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def UpperCamelCase_ ( ) -> Union[str, Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37 | 0 |
"""simple docstring"""
from sklearn.metrics import mean_squared_error
import datasets
__lowerCAmelCase : Any = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
__lowerCAmelCase : Optional[Any] = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
__lowerCAmelCase : Optional[Any] = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def _SCREAMING_SNAKE_CASE ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Optional[Any]=None , snake_case__ : Dict="uniform_average" , snake_case__ : Tuple=True ):
lowerCAmelCase__ = mean_squared_error(
lowerCamelCase__ , lowerCamelCase__ , sample_weight=lowerCamelCase__ , multioutput=lowerCamelCase__ , squared=lowerCamelCase__ )
return {"mse": mse}
| 644 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class A__ ( A__ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase__ : Union[str, "sqlalchemy.sql.Selectable"] , lowerCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , **lowerCamelCase__ : Optional[int] , ):
super().__init__(features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , **lowerCamelCase__ )
a__ : str = Sql(
cache_dir=lowerCamelCase__ , features=lowerCamelCase__ , sql=lowerCamelCase__ , con=lowerCamelCase__ , **lowerCamelCase__ , )
def _UpperCamelCase( self : Tuple ):
a__ : Optional[Any] = None
a__ : Dict = None
a__ : Union[str, Any] = None
a__ : Union[str, Any] = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , )
# Build dataset for splits
a__ : List[str] = self.builder.as_dataset(
split="train" , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory )
return dataset
class A__ :
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase__ : Dataset , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : Optional[Any] , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
a__ : Any = dataset
a__ : str = name
a__ : Tuple = con
a__ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
a__ : Any = num_proc
a__ : Tuple = to_sql_kwargs
def _UpperCamelCase( self : List[Any] ):
a__ : Any = self.to_sql_kwargs.pop("sql" , lowerCamelCase__ )
a__ : int = self.to_sql_kwargs.pop("con" , lowerCamelCase__ )
a__ : int = self.to_sql_kwargs.pop("index" , lowerCamelCase__ )
a__ : int = self._write(index=lowerCamelCase__ , **self.to_sql_kwargs )
return written
def _UpperCamelCase( self : Any , lowerCamelCase__ : List[str] ):
a__, a__, a__ : Union[str, Any] = args
a__ : Any = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
a__ : Tuple = query_table(
table=self.dataset.data , key=slice(lowerCamelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , )
a__ : str = batch.to_pandas()
a__ : List[Any] = df.to_sql(self.name , self.con , index=lowerCamelCase__ , **lowerCamelCase__ )
return num_rows or len(lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ):
a__ : str = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
a__, a__ : List[str] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , lowerCamelCase__ , lowerCamelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 37 | 0 |
"""simple docstring"""
from typing import Any
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A ) -> Union[str, Any]:
_UpperCAmelCase : List[str] = data
_UpperCAmelCase : List[Any] = None
def __repr__( self ) -> str:
return f'Node({self.data})'
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self ) -> Union[str, Any]:
_UpperCAmelCase : Union[str, Any] = None
def __iter__( self ) -> Optional[Any]:
_UpperCAmelCase : List[str] = self.head
while node:
yield node.data
_UpperCAmelCase : List[Any] = node.next
def __len__( self ) -> Dict:
return sum(1 for _ in self )
def __repr__( self ) -> str:
return "->".join([str(lowerCamelCase__ ) for item in self] )
def __getitem__( self , A ) -> int:
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , A , A ) -> Optional[int]:
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
_UpperCAmelCase : int = self.head
for _ in range(lowerCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = current.next
_UpperCAmelCase : Optional[int] = data
def __lowerCAmelCase ( self , A ) -> Dict:
self.insert_nth(len(self ) , lowerCamelCase__ )
def __lowerCAmelCase ( self , A ) -> List[Any]:
self.insert_nth(0 , lowerCamelCase__ )
def __lowerCAmelCase ( self , A , A ) -> List[str]:
if not 0 <= index <= len(self ):
raise IndexError('''list index out of range''' )
_UpperCAmelCase : Union[str, Any] = Node(lowerCamelCase__ )
if self.head is None:
_UpperCAmelCase : List[Any] = new_node
elif index == 0:
_UpperCAmelCase : str = self.head # link new_node to head
_UpperCAmelCase : int = new_node
else:
_UpperCAmelCase : List[Any] = self.head
for _ in range(index - 1 ):
_UpperCAmelCase : Optional[int] = temp.next
_UpperCAmelCase : Optional[int] = temp.next
_UpperCAmelCase : Dict = new_node
def __lowerCAmelCase ( self ) -> Any: # print every node data
print(self )
def __lowerCAmelCase ( self ) -> Any:
return self.delete_nth(0 )
def __lowerCAmelCase ( self ) -> str: # delete from tail
return self.delete_nth(len(self ) - 1 )
def __lowerCAmelCase ( self , A = 0 ) -> Optional[int]:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('''List index out of range.''' )
_UpperCAmelCase : Optional[int] = self.head # default first node
if index == 0:
_UpperCAmelCase : Tuple = self.head.next
else:
_UpperCAmelCase : List[Any] = self.head
for _ in range(index - 1 ):
_UpperCAmelCase : int = temp.next
_UpperCAmelCase : Tuple = temp.next
_UpperCAmelCase : int = temp.next.next
return delete_node.data
def __lowerCAmelCase ( self ) -> Optional[int]:
return self.head is None
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : Any = None
_UpperCAmelCase : List[Any] = self.head
while current:
# Store the current node's next node.
_UpperCAmelCase : Tuple = current.next
# Make the current node's next point backwards
_UpperCAmelCase : int = prev
# Make the previous node be the current node
_UpperCAmelCase : Optional[Any] = current
# Make the current node the next node (to progress iteration)
_UpperCAmelCase : Tuple = next_node
# Return prev in order to put the head at the end
_UpperCAmelCase : Optional[Any] = prev
def lowerCamelCase_ ():
_UpperCAmelCase : Tuple = LinkedList()
assert linked_list.is_empty() is True
assert str(__a ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__a ) == i
linked_list.insert_nth(__a , i + 1 )
assert str(__a ) == "->".join(str(__a ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__a ) == "->".join(str(__a ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__a ) == 9
assert str(__a ) == "->".join(str(__a ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
_UpperCAmelCase : int = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(__a ) == "->".join(str(__a ) for i in range(-8 , 1 ) )
def lowerCamelCase_ ():
_UpperCAmelCase : Union[str, Any] = [
-9,
100,
Node(7734_5112 ),
"dlrow olleH",
7,
5555,
0,
-192.5_5555,
"Hello, world!",
77.9,
Node(10 ),
None,
None,
12.20,
]
_UpperCAmelCase : Any = LinkedList()
for i in test_input:
linked_list.insert_tail(__a )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__a ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
_UpperCAmelCase : int = linked_list.delete_head()
assert result == -9
assert (
str(__a ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
_UpperCAmelCase : List[Any] = linked_list.delete_tail()
assert result == 12.2
assert (
str(__a ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
_UpperCAmelCase : List[str] = linked_list.delete_nth(10 )
assert result is None
assert (
str(__a ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('''Hello again, world!''' ) )
assert (
str(__a )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__a )
assert (
str(__a )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__a )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowerCamelCase_ ():
from doctest import testmod
testmod()
_UpperCAmelCase : Any = LinkedList()
linked_list.insert_head(input('''Inserting 1st at head ''' ).strip() )
linked_list.insert_head(input('''Inserting 2nd at head ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
linked_list.insert_tail(input('''\nInserting 1st at tail ''' ).strip() )
linked_list.insert_tail(input('''Inserting 2nd at tail ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nDelete head''' )
linked_list.delete_head()
print('''Delete tail''' )
linked_list.delete_tail()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nReverse linked list''' )
linked_list.reverse()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nString representation of linked list:''' )
print(__a )
print('''\nReading/changing Node data using indexing:''' )
print(F'Element at Position 1: {linked_list[1]}' )
_UpperCAmelCase : Optional[int] = input('''Enter New Value: ''' ).strip()
print('''New list:''' )
print(__a )
print(F'length of linked_list is : {len(__a )}' )
if __name__ == "__main__":
main()
| 506 |
import math
from datetime import datetime, timedelta
def UpperCamelCase_ ( __a ) -> datetime:
a__ : Union[str, Any] = year % 19
a__ : List[str] = year % 4
a__ : str = year % 7
a__ : Any = math.floor(year / 100 )
a__ : List[str] = math.floor((13 + 8 * leap_day_inhibits) / 25 )
a__ : Optional[int] = leap_day_inhibits / 4
a__ : Union[str, Any] = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
a__ : Dict = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
a__ : Any = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
a__ : List[Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__a , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__a , 4 , 18 )
else:
return datetime(__a , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
UpperCamelCase : Tuple = """will be""" if year > datetime.now().year else """was"""
print(f"""Easter in {year} {tense} {gauss_easter(year)}""")
| 37 | 0 |
'''simple docstring'''
class A :
def __init__( self ) -> str:
_a = ""
_a = ""
_a = []
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> str:
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
_a = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
_a = self.__min_dist_top_down_dp(lowerCamelCase__ , n - 1 )
_a = self.__min_dist_top_down_dp(m - 1 , lowerCamelCase__ )
_a = self.__min_dist_top_down_dp(m - 1 , n - 1 )
_a = 1 + min(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return self.dp[m][n]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Any:
_a = worda
_a = worda
_a = [[-1 for _ in range(len(lowerCamelCase__ ) )] for _ in range(len(lowerCamelCase__ ) )]
return self.__min_dist_top_down_dp(len(lowerCamelCase__ ) - 1 , len(lowerCamelCase__ ) - 1 )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> List[Any]:
_a = worda
_a = worda
_a = len(lowerCamelCase__ )
_a = len(lowerCamelCase__ )
_a = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
_a = j
elif j == 0: # second string is empty
_a = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
_a = self.dp[i - 1][j - 1]
else:
_a = self.dp[i][j - 1]
_a = self.dp[i - 1][j]
_a = self.dp[i - 1][j - 1]
_a = 1 + min(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return self.dp[m][n]
if __name__ == "__main__":
__snake_case : Optional[Any] = EditDistance()
print("****************** Testing Edit Distance DP Algorithm ******************")
print()
__snake_case : int = input("Enter the first string: ").strip()
__snake_case : Dict = input("Enter the second string: ").strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print("*************** End of Testing Edit Distance DP Algorithm ***************")
| 131 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def UpperCamelCase_ ( __a ) -> Union[str, Any]:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase__ : nn.Module , lowerCamelCase__ : int ):
super().__init__()
a__ : int = module
a__ : Any = nn.Sequential(
nn.Linear(module.in_features , lowerCamelCase__ , bias=lowerCamelCase__ ) , nn.Linear(lowerCamelCase__ , module.out_features , bias=lowerCamelCase__ ) , )
a__ : Tuple = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCamelCase__ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , *lowerCamelCase__ : int , **lowerCamelCase__ : Dict ):
return self.module(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) + self.adapter(lowerCamelCase__ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
"""simple docstring"""
_lowercase = 'bigscience/bloom-1b7'
# Constant values
_lowercase = 2.1_09_65_95_52_69_25_74
_lowercase = 'Hello my name is'
_lowercase = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
_lowercase = 1_0
def _UpperCamelCase( self : Dict ):
# Models and tokenizer
a__ : List[str] = AutoTokenizer.from_pretrained(self.model_name )
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : Union[str, Any] ):
super().setUp()
# Models and tokenizer
a__ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
a__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
def _UpperCamelCase( self : List[Any] ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase( self : List[Any] ):
a__ : str = self.model_abit.config
self.assertTrue(hasattr(lowerCamelCase__ , "quantization_config" ) )
a__ : Optional[Any] = config.to_dict()
a__ : int = config.to_diff_dict()
a__ : List[str] = config.to_json_string()
def _UpperCamelCase( self : int ):
from bitsandbytes.nn import Paramsabit
a__ : List[Any] = self.model_fpaa.get_memory_footprint()
a__ : str = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
a__ : Optional[Any] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def _UpperCamelCase( self : Tuple ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCamelCase__ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def _UpperCamelCase( self : str ):
a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" )
a__ : Tuple = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS )
def _UpperCamelCase( self : List[Any] ):
a__ : Optional[Any] = BitsAndBytesConfig()
a__ : Optional[int] = True
a__ : int = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCamelCase__ , device_map="auto" )
a__ : str = self.tokenizer(self.input_text , return_tensors="pt" )
a__ : int = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS )
def _UpperCamelCase( self : Dict ):
with self.assertRaises(lowerCamelCase__ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCamelCase__ )
def _UpperCamelCase( self : Union[str, Any] ):
a__ : int = BitsAndBytesConfig()
with self.assertRaises(lowerCamelCase__ ):
a__ : Dict = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCamelCase__ , load_in_abit=lowerCamelCase__ , device_map="auto" , bnb_abit_quant_type="nf4" , )
def _UpperCamelCase( self : int ):
with self.assertRaises(lowerCamelCase__ ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(lowerCamelCase__ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCamelCase__ ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(lowerCamelCase__ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCamelCase__ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
a__ : int = self.tokenizer(self.input_text , return_tensors="pt" )
a__ : Any = self.model_fpaa.to(torch.floataa )
a__ : List[Any] = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
a__ : Tuple = self.model_fpaa.to("cpu" )
# Check this does not throw an error
a__ : Tuple = self.model_fpaa.half()
# Check this does not throw an error
a__ : Dict = self.model_fpaa.float()
def _UpperCamelCase( self : Dict ):
a__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=lowerCamelCase__ , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def _UpperCamelCase( cls : str ):
a__ : Dict = "t5-small"
a__ : List[Any] = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
a__ : int = AutoTokenizer.from_pretrained(cls.model_name )
a__ : str = "Translate in German: Hello, my dog is cute"
def _UpperCamelCase( self : Optional[int] ):
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase( self : Optional[int] ):
from transformers import TaForConditionalGeneration
a__ : List[Any] = TaForConditionalGeneration._keep_in_fpaa_modules
a__ : Optional[Any] = None
# test with `t5-small`
a__ : Any = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a__ : int = model.generate(**lowerCamelCase__ )
# test with `flan-t5-small`
a__ : Dict = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a__ : Any = model.generate(**lowerCamelCase__ )
a__ : Union[str, Any] = modules
def _UpperCamelCase( self : List[Any] ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
a__ : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
a__ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a__ : int = model.generate(**lowerCamelCase__ )
# test with `flan-t5-small`
a__ : int = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
a__ : Any = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a__ : Optional[int] = model.generate(**lowerCamelCase__ )
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : List[str] ):
super().setUp()
# model_name
a__ : Union[str, Any] = "bigscience/bloom-560m"
a__ : Union[str, Any] = "t5-small"
# Different types of model
a__ : int = AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
# Sequence classification model
a__ : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
# CausalLM model
a__ : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
# Seq2seq model
a__ : Dict = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
def _UpperCamelCase( self : List[Any] ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase( self : Union[str, Any] ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : Dict ):
super().setUp()
def _UpperCamelCase( self : int ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase( self : Tuple ):
a__ : int = pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
a__ : Tuple = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : Tuple ):
super().setUp()
def _UpperCamelCase( self : List[Any] ):
a__ : str = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCamelCase__ , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
a__ : List[Any] = self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
a__ : List[Any] = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS )
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : Dict ):
a__ : Any = "facebook/opt-350m"
super().setUp()
def _UpperCamelCase( self : int ):
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
a__ : Tuple = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
a__ : Any = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
a__ : Tuple = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCamelCase__ ) ):
a__ : Dict = LoRALayer(module.q_proj , rank=16 )
a__ : List[Any] = LoRALayer(module.k_proj , rank=16 )
a__ : List[Any] = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
a__ : Dict = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
a__ : Optional[Any] = model.forward(**lowerCamelCase__ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCamelCase__ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'gpt2-xl'
_lowercase = 3.31_91_85_48_54_15_21_87
| 37 | 0 |
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
A = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class a__ ( nn.Module ):
def __init__( self : List[str] , UpperCamelCase_ : Optional[int]):
"""simple docstring"""
super().__init__()
__UpperCAmelCase : Union[str, Any] = torchvision.models.resnetaaa(pretrained=lowerCamelCase__)
__UpperCAmelCase : Optional[Any] = list(model.children())[:-2]
__UpperCAmelCase : List[str] = nn.Sequential(*lowerCamelCase__)
__UpperCAmelCase : List[str] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds])
def a_ ( self : Union[str, Any] , UpperCamelCase_ : List[Any]):
"""simple docstring"""
__UpperCAmelCase : Dict = self.pool(self.model(lowerCamelCase__))
__UpperCAmelCase : Tuple = torch.flatten(lowerCamelCase__ , start_dim=2)
__UpperCAmelCase : Any = out.transpose(1 , 2).contiguous()
return out # BxNx2048
class a__ ( A__ ):
def __init__( self : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str):
"""simple docstring"""
__UpperCAmelCase : List[Any] = [json.loads(lowerCamelCase__) for l in open(lowerCamelCase__)]
__UpperCAmelCase : int = os.path.dirname(lowerCamelCase__)
__UpperCAmelCase : Dict = tokenizer
__UpperCAmelCase : int = labels
__UpperCAmelCase : Optional[int] = len(lowerCamelCase__)
__UpperCAmelCase : int = max_seq_length
__UpperCAmelCase : Optional[Any] = transforms
def __len__( self : List[Any]):
"""simple docstring"""
return len(self.data)
def __getitem__( self : Optional[int] , UpperCamelCase_ : Any):
"""simple docstring"""
__UpperCAmelCase : Any = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] , add_special_tokens=lowerCamelCase__))
__UpperCAmelCase : str = sentence[0], sentence[1:-1], sentence[-1]
__UpperCAmelCase : Any = sentence[: self.max_seq_length]
__UpperCAmelCase : int = torch.zeros(self.n_classes)
__UpperCAmelCase : int = 1
__UpperCAmelCase : Optional[Any] = Image.open(os.path.join(self.data_dir , self.data[index]["img"])).convert("RGB")
__UpperCAmelCase : int = self.transforms(lowerCamelCase__)
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : Dict = Counter()
for row in self.data:
label_freqs.update(row["label"])
return label_freqs
def _UpperCamelCase ( UpperCamelCase ) -> Any:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = [len(row["sentence"] ) for row in batch]
__UpperCAmelCase : Optional[Any] = len(__a ), max(__a )
__UpperCAmelCase : Optional[Any] = torch.zeros(__a , __a , dtype=torch.long )
__UpperCAmelCase : str = torch.zeros(__a , __a , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(__a , __a ) ):
__UpperCAmelCase : str = input_row["sentence"]
__UpperCAmelCase : Tuple = 1
__UpperCAmelCase : Optional[Any] = torch.stack([row["image"] for row in batch] )
__UpperCAmelCase : Any = torch.stack([row["label"] for row in batch] )
__UpperCAmelCase : Dict = torch.stack([row["image_start_token"] for row in batch] )
__UpperCAmelCase : Optional[Any] = torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def _UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def _UpperCamelCase ( ) -> Any:
"""simple docstring"""
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46777044, 0.44531429, 0.40661017] , std=[0.12221994, 0.12145835, 0.14380469] , ),
] )
| 77 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class A__ :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int]=100 , lowerCamelCase__ : str=13 , lowerCamelCase__ : Optional[int]=30 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : int=32 , lowerCamelCase__ : Union[str, Any]=4 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Union[str, Any]=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Union[str, Any]=10 , lowerCamelCase__ : str=0.02 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]=[0, 1, 2, 3] , ):
a__ : Dict = parent
a__ : Dict = 100
a__ : Optional[int] = batch_size
a__ : Union[str, Any] = image_size
a__ : Any = patch_size
a__ : Optional[Any] = num_channels
a__ : int = is_training
a__ : List[str] = use_labels
a__ : Optional[Any] = hidden_size
a__ : List[Any] = num_hidden_layers
a__ : str = num_attention_heads
a__ : str = intermediate_size
a__ : int = hidden_act
a__ : List[Any] = hidden_dropout_prob
a__ : Dict = attention_probs_dropout_prob
a__ : Union[str, Any] = type_sequence_label_size
a__ : Optional[Any] = initializer_range
a__ : List[str] = scope
a__ : int = out_indices
a__ : List[str] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a__ : Optional[int] = (image_size // patch_size) ** 2
a__ : Union[str, Any] = num_patches + 1
def _UpperCamelCase( self : int ):
a__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ : Optional[Any] = None
a__ : Tuple = None
if self.use_labels:
a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a__ : Optional[int] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _UpperCamelCase( self : Tuple ):
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any ):
a__ : str = BeitModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ):
a__ : int = BeitForMaskedImageModeling(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : List[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _UpperCamelCase( self : str , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any ):
a__ : List[str] = self.type_sequence_label_size
a__ : Optional[Any] = BeitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : str = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a__ : Optional[Any] = 1
a__ : List[str] = BeitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a__ : Union[str, Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCamelCase( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ):
a__ : int = self.num_labels
a__ : List[str] = BeitForSemanticSegmentation(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : Tuple = model(lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
a__ : str = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _UpperCamelCase( self : Optional[int] ):
a__ : Any = self.prepare_config_and_inputs()
a__, a__, a__, a__ : Union[str, Any] = config_and_inputs
a__ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
_lowercase = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
def _UpperCamelCase( self : Any ):
a__ : int = BeitModelTester(self )
a__ : Optional[Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def _UpperCamelCase( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def _UpperCamelCase( self : str ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def _UpperCamelCase( self : Dict ):
pass
def _UpperCamelCase( self : Optional[Any] ):
a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : List[str] = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def _UpperCamelCase( self : str ):
a__, a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : int = model_class(lowerCamelCase__ )
a__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Optional[int] = [*signature.parameters.keys()]
a__ : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
a__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _UpperCamelCase( self : int ):
a__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
a__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] ):
a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
if not self.model_tester.is_training:
return
a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a__ : str = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling]:
continue
a__ : List[str] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
a__ : Any = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
a__ : Tuple = model(**lowerCamelCase__ ).loss
loss.backward()
def _UpperCamelCase( self : Tuple ):
a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
a__ : List[Any] = False
a__ : List[str] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
a__ : Optional[Any] = model_class(lowerCamelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase__ )
model.train()
a__ : Union[str, Any] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
a__ : int = model(**lowerCamelCase__ ).loss
loss.backward()
def _UpperCamelCase( self : List[str] ):
a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Dict = _config_zero_init(lowerCamelCase__ )
for model_class in self.all_model_classes:
a__ : str = model_class(config=lowerCamelCase__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def _UpperCamelCase( self : Optional[int] ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Tuple = BeitModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def UpperCamelCase_ ( ) -> Any:
a__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase( self : Optional[int] ):
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def _UpperCamelCase( self : str ):
a__ : int = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(lowerCamelCase__ )
a__ : Optional[Any] = self.default_image_processor
a__ : Dict = prepare_img()
a__ : Optional[int] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).pixel_values.to(lowerCamelCase__ )
# prepare bool_masked_pos
a__ : Optional[Any] = torch.ones((1, 196) , dtype=torch.bool ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Any = model(pixel_values=lowerCamelCase__ , bool_masked_pos=lowerCamelCase__ )
a__ : Tuple = outputs.logits
# verify the logits
a__ : List[str] = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape , lowerCamelCase__ )
a__ : Optional[int] = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , lowerCamelCase__ , atol=1E-2 ) )
@slow
def _UpperCamelCase( self : Dict ):
a__ : str = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(lowerCamelCase__ )
a__ : int = self.default_image_processor
a__ : List[Any] = prepare_img()
a__ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Union[str, Any] = model(**lowerCamelCase__ )
a__ : List[str] = outputs.logits
# verify the logits
a__ : Union[str, Any] = torch.Size((1, 1_000) )
self.assertEqual(logits.shape , lowerCamelCase__ )
a__ : int = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
a__ : Tuple = 281
self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ )
@slow
def _UpperCamelCase( self : Any ):
a__ : Dict = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
lowerCamelCase__ )
a__ : str = self.default_image_processor
a__ : List[str] = prepare_img()
a__ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Dict = model(**lowerCamelCase__ )
a__ : List[str] = outputs.logits
# verify the logits
a__ : Optional[int] = torch.Size((1, 21_841) )
self.assertEqual(logits.shape , lowerCamelCase__ )
a__ : Optional[Any] = torch.tensor([1.6881, -0.2787, 0.5901] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
a__ : Optional[Any] = 2_396
self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ )
@slow
def _UpperCamelCase( self : int ):
a__ : Optional[Any] = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
a__ : Tuple = model.to(lowerCamelCase__ )
a__ : List[Any] = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ )
a__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
a__ : Union[str, Any] = Image.open(ds[0]["file"] )
a__ : List[Any] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Optional[Any] = model(**lowerCamelCase__ )
a__ : List[str] = outputs.logits
# verify the logits
a__ : Tuple = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , lowerCamelCase__ )
a__ : int = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
a__ : Dict = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=lowerCamelCase__ , )
else:
a__ : Dict = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=lowerCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def _UpperCamelCase( self : Tuple ):
a__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
a__ : List[Any] = model.to(lowerCamelCase__ )
a__ : int = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ )
a__ : Optional[int] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
a__ : str = Image.open(ds[0]["file"] )
a__ : str = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : List[Any] = model(**lowerCamelCase__ )
a__ : Any = outputs.logits.detach().cpu()
a__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ , target_sizes=[(500, 300)] )
a__ : Optional[int] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
a__ : List[str] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ )
a__ : Any = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
| 37 | 0 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class _SCREAMING_SNAKE_CASE:
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=13 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=24 ,SCREAMING_SNAKE_CASE__=16 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=32 ,SCREAMING_SNAKE_CASE__=5 ,SCREAMING_SNAKE_CASE__=4 ,SCREAMING_SNAKE_CASE__=37 ,SCREAMING_SNAKE_CASE__="gelu" ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=10 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=2 ,) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = parent
__SCREAMING_SNAKE_CASE :Any = batch_size
__SCREAMING_SNAKE_CASE :Dict = patch_size
__SCREAMING_SNAKE_CASE :List[Any] = max_length
__SCREAMING_SNAKE_CASE :str = num_mel_bins
__SCREAMING_SNAKE_CASE :Optional[Any] = is_training
__SCREAMING_SNAKE_CASE :Optional[int] = use_labels
__SCREAMING_SNAKE_CASE :List[Any] = hidden_size
__SCREAMING_SNAKE_CASE :str = num_hidden_layers
__SCREAMING_SNAKE_CASE :Any = num_attention_heads
__SCREAMING_SNAKE_CASE :Union[str, Any] = intermediate_size
__SCREAMING_SNAKE_CASE :List[str] = hidden_act
__SCREAMING_SNAKE_CASE :str = hidden_dropout_prob
__SCREAMING_SNAKE_CASE :Tuple = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE :List[Any] = type_sequence_label_size
__SCREAMING_SNAKE_CASE :Any = initializer_range
__SCREAMING_SNAKE_CASE :str = scope
__SCREAMING_SNAKE_CASE :List[str] = frequency_stride
__SCREAMING_SNAKE_CASE :Union[str, Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__SCREAMING_SNAKE_CASE :List[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
__SCREAMING_SNAKE_CASE :List[str] = (self.max_length - self.patch_size) // self.time_stride + 1
__SCREAMING_SNAKE_CASE :Tuple = frequency_out_dimension * time_out_dimension
__SCREAMING_SNAKE_CASE :List[str] = num_patches + 2
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
__SCREAMING_SNAKE_CASE :List[Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE :str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE :List[str] = self.get_config()
return config, input_values, labels
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size ,max_length=self.max_length ,num_mel_bins=self.num_mel_bins ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase__ ,initializer_range=self.initializer_range ,frequency_stride=self.frequency_stride ,time_stride=self.time_stride ,)
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = ASTModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE :Dict = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = self.prepare_config_and_inputs()
(
__SCREAMING_SNAKE_CASE
) :Optional[int] = config_and_inputs
__SCREAMING_SNAKE_CASE :List[Any] = {"input_values": input_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE( A__ , A__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Tuple = (
{'''audio-classification''': ASTForAudioClassification, '''feature-extraction''': ASTModel}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = ASTModelTester(self )
__SCREAMING_SNAKE_CASE :Any = ConfigTester(self ,config_class=lowerCamelCase__ ,has_text_modality=lowerCamelCase__ ,hidden_size=37 )
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
pass
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE :Any = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
__SCREAMING_SNAKE_CASE :Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ ,nn.Linear ) )
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE :Dict = model_class(lowerCamelCase__ )
__SCREAMING_SNAKE_CASE :Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE :Optional[int] = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE :Optional[Any] = ["input_values"]
self.assertListEqual(arg_names[:1] ,lowerCamelCase__ )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
@slow
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE :Union[str, Any] = ASTModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def __lowerCamelCase ( ) -> Any:
__SCREAMING_SNAKE_CASE :Optional[int] = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
__SCREAMING_SNAKE_CASE :List[str] = torchaudio.load(__a )
return audio, sampling_rate
@require_torch
@require_torchaudio
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
@cached_property
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = self.default_feature_extractor
__SCREAMING_SNAKE_CASE :Optional[Any] = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(lowerCamelCase__ )
__SCREAMING_SNAKE_CASE :Any = self.default_feature_extractor
__SCREAMING_SNAKE_CASE :Dict = prepare_audio()
__SCREAMING_SNAKE_CASE :str = audio.squeeze().numpy()
__SCREAMING_SNAKE_CASE :Any = feature_extractor(lowerCamelCase__ ,sampling_rate=lowerCamelCase__ ,return_tensors='''pt''' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE :Any = model(**lowerCamelCase__ )
# verify the logits
__SCREAMING_SNAKE_CASE :Union[str, Any] = torch.Size((1, 5_27) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase__ )
__SCREAMING_SNAKE_CASE :List[str] = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase__ ,atol=1E-4 ) ) | 498 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCamelCase : Dict = logging.get_logger(__name__)
def UpperCamelCase_ ( __a ) -> Union[str, Any]:
a__ : Tuple = R"\w+[.]\d+"
a__ : List[Any] = re.findall(__a , __a )
for pat in pats:
a__ : Union[str, Any] = key.replace(__a , "_".join(pat.split("." ) ) )
return key
def UpperCamelCase_ ( __a , __a , __a ) -> List[str]:
a__ : List[str] = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
a__ : Any = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
a__ : Optional[Any] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
a__ : Union[str, Any] = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
a__ : List[str] = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
a__ : str = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
a__ : Tuple = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
a__ : Tuple = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
a__ : Optional[Any] = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
a__ : Union[str, Any] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCamelCase_ ( __a , __a , __a=42 ) -> str:
# Step 1: Convert pytorch tensor to numpy
a__ : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
a__ : Tuple = flax_model.init_weights(PRNGKey(__a ) )
a__ : Optional[Any] = flatten_dict(__a )
a__ : Union[str, Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
a__ : Optional[int] = rename_key(__a )
a__ : Optional[int] = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
a__, a__ : Union[str, Any] = rename_key_and_reshape_tensor(__a , __a , __a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
a__ : str = jnp.asarray(__a )
return unflatten_dict(__a )
| 37 | 0 |
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=24 , snake_case_=2 , snake_case_=6 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.0_2 , snake_case_=3 , snake_case_=None , snake_case_=1000 , ):
'''simple docstring'''
__UpperCAmelCase: int = parent
__UpperCAmelCase: Optional[int] = batch_size
__UpperCAmelCase: Optional[Any] = seq_length
__UpperCAmelCase: Tuple = is_training
__UpperCAmelCase: Union[str, Any] = use_input_mask
__UpperCAmelCase: str = use_token_type_ids
__UpperCAmelCase: Union[str, Any] = use_labels
__UpperCAmelCase: Tuple = vocab_size
__UpperCAmelCase: Any = hidden_size
__UpperCAmelCase: Tuple = num_hidden_layers
__UpperCAmelCase: Union[str, Any] = num_attention_heads
__UpperCAmelCase: Union[str, Any] = intermediate_size
__UpperCAmelCase: Any = hidden_act
__UpperCAmelCase: Dict = hidden_dropout_prob
__UpperCAmelCase: Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase: Optional[int] = max_position_embeddings
__UpperCAmelCase: Any = type_vocab_size
__UpperCAmelCase: str = type_sequence_label_size
__UpperCAmelCase: Union[str, Any] = initializer_range
__UpperCAmelCase: Optional[Any] = num_labels
__UpperCAmelCase: List[Any] = scope
__UpperCAmelCase: List[Any] = range_bbox
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase: str = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__UpperCAmelCase: List[Any] = bbox[i, j, 3]
__UpperCAmelCase: int = bbox[i, j, 1]
__UpperCAmelCase: int = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__UpperCAmelCase: str = bbox[i, j, 2]
__UpperCAmelCase: str = bbox[i, j, 0]
__UpperCAmelCase: Dict = t
__UpperCAmelCase: List[Any] = None
if self.use_input_mask:
__UpperCAmelCase: Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__UpperCAmelCase: List[str] = None
if self.use_token_type_ids:
__UpperCAmelCase: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase: int = None
__UpperCAmelCase: Union[str, Any] = None
if self.use_labels:
__UpperCAmelCase: List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase: List[Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase_ ( self ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = LiltModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCAmelCase: Any = model(lowerCamelCase__ , bbox=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
__UpperCAmelCase: Union[str, Any] = model(lowerCamelCase__ , bbox=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
__UpperCAmelCase: Any = model(lowerCamelCase__ , bbox=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
'''simple docstring'''
__UpperCAmelCase: Any = self.num_labels
__UpperCAmelCase: Dict = LiltForTokenClassification(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCAmelCase: Any = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = LiltForQuestionAnswering(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCAmelCase: str = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = self.prepare_config_and_inputs()
(
__UpperCAmelCase
): List[str] = config_and_inputs
__UpperCAmelCase: Optional[Any] = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class a ( A__ , A__ , A__ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowerCAmelCase = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
return True
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Any = LiltModelTester(self )
__UpperCAmelCase: Tuple = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def lowercase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase: Union[str, Any] = type
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase__ )
@slow
def lowercase_ ( self ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase: Optional[Any] = LiltModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@require_torch
@slow
class a ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: int = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(lowerCamelCase__ )
__UpperCAmelCase: Union[str, Any] = torch.tensor([[1, 2]] , device=lowerCamelCase__ )
__UpperCAmelCase: Optional[Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowerCamelCase__ )
# forward pass
with torch.no_grad():
__UpperCAmelCase: Dict = model(input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ )
__UpperCAmelCase: Any = torch.Size([1, 2, 768] )
__UpperCAmelCase: Any = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=lowerCamelCase__ , )
self.assertTrue(outputs.last_hidden_state.shape , lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowerCamelCase__ , atol=1e-3 ) ) | 523 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCamelCase_ ( ) -> int:
a__ : Any = HfArgumentParser(__a )
a__ : Any = parser.parse_args_into_dataclasses()[0]
a__ : Optional[int] = TensorFlowBenchmark(args=__a )
try:
a__ : Optional[int] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
a__ : Tuple = "Arg --no_{0} is no longer used, please use --no-{0} instead."
a__ : List[Any] = " ".join(str(__a ).split(" " )[:-1] )
a__ : str = ""
a__ : List[Any] = eval(str(__a ).split(" " )[-1] )
a__ : List[str] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__a )
if len(__a ) > 0:
a__ : Tuple = full_error_msg + begin_error_msg + str(__a )
raise ValueError(__a )
benchmark.run()
if __name__ == "__main__":
main()
| 37 | 0 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
__lowerCAmelCase : Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCAmelCase : str = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
__lowerCAmelCase : Optional[int] = {
"""gpt2""": 1_024,
"""gpt2-medium""": 1_024,
"""gpt2-large""": 1_024,
"""gpt2-xl""": 1_024,
"""distilgpt2""": 1_024,
}
class A ( A__ ):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ['''input_ids''', '''attention_mask''']
a_ = GPTaTokenizer
def __init__( self : Optional[Any] , __a : List[str]=None , __a : Union[str, Any]=None , __a : Optional[int]=None , __a : Any="<|endoftext|>" , __a : Dict="<|endoftext|>" , __a : Tuple="<|endoftext|>" , __a : str=False , **__a : Any , ) -> Optional[int]:
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , unk_token=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , **lowerCamelCase__ , )
__UpperCAmelCase = kwargs.pop('''add_bos_token''' , lowerCamelCase__ )
__UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowerCamelCase__ ) != add_prefix_space:
__UpperCAmelCase = getattr(lowerCamelCase__ , pre_tok_state.pop('''type''' ) )
__UpperCAmelCase = add_prefix_space
__UpperCAmelCase = pre_tok_class(**lowerCamelCase__ )
__UpperCAmelCase = add_prefix_space
def snake_case__ ( self : Union[str, Any] , *__a : Union[str, Any] , **__a : Optional[int] ) -> Optional[Any]:
__UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowerCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def snake_case__ ( self : Optional[Any] , *__a : int , **__a : Optional[Any] ) -> Any:
__UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowerCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def snake_case__ ( self : Optional[Any] , __a : str , __a : Optional[str] = None ) -> Any:
__UpperCAmelCase = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def snake_case__ ( self : Any , __a : "Conversation" ) -> int:
__UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) + [self.eos_token_id] )
if len(lowerCamelCase__ ) > self.model_max_length:
__UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
| 262 |
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
UpperCamelCase : Optional[int] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def UpperCamelCase_ ( __a ) -> Any:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def UpperCamelCase_ ( __a , __a , __a ) -> Any:
return max(metric_fn(__a , __a ) for gt in ground_truths )
def UpperCamelCase_ ( __a , __a , __a ) -> List[str]:
a__ : Tuple = [line.strip() for line in open(__a , "r" ).readlines()]
a__ : Tuple = []
if args.gold_data_mode == "qa":
a__ : Any = pd.read_csv(__a , sep="\t" , header=__a )
for answer_list in data[1]:
a__ : Union[str, Any] = ast.literal_eval(__a )
answers.append(__a )
else:
a__ : List[str] = [line.strip() for line in open(__a , "r" ).readlines()]
a__ : List[str] = [[reference] for reference in references]
a__ : List[str] = 0
for prediction, ground_truths in zip(__a , __a ):
total += 1
em += metric_max_over_ground_truths(__a , __a , __a )
fa += metric_max_over_ground_truths(__a , __a , __a )
a__ : Dict = 100.0 * em / total
a__ : Optional[Any] = 100.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]:
a__ : Optional[Any] = args.k
a__ : str = [line.strip() for line in open(__a , "r" ).readlines()]
a__ : Tuple = [line.strip() for line in open(__a , "r" ).readlines()]
a__ : Tuple = 0
for hypo, reference in zip(__a , __a ):
a__ : Any = set(hypo.split("\t" )[:k] )
a__ : Union[str, Any] = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
a__ : Union[str, Any] = 100.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]:
def strip_title(__a ):
if title.startswith("\"" ):
a__ : Optional[Any] = title[1:]
if title.endswith("\"" ):
a__ : Union[str, Any] = title[:-1]
return title
a__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__a , return_tensors="pt" , padding=__a , truncation=__a , )["input_ids"].to(args.device )
a__ : Optional[int] = rag_model.rag.question_encoder(__a )
a__ : Union[str, Any] = question_enc_outputs[0]
a__ : Optional[int] = rag_model.retriever(
__a , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
a__ : List[Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
a__ : int = []
for docs in all_docs:
a__ : Optional[int] = [strip_title(__a ) for title in docs["title"]]
provenance_strings.append("\t".join(__a ) )
return provenance_strings
def UpperCamelCase_ ( __a , __a , __a ) -> Dict:
with torch.no_grad():
a__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__a , return_tensors="pt" , padding=__a , truncation=__a )
a__ : Any = inputs_dict.input_ids.to(args.device )
a__ : Dict = inputs_dict.attention_mask.to(args.device )
a__ : Optional[int] = rag_model.generate( # rag_model overwrites generate
__a , attention_mask=__a , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__a , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
a__ : int = rag_model.retriever.generator_tokenizer.batch_decode(__a , skip_special_tokens=__a )
if args.print_predictions:
for q, a in zip(__a , __a ):
logger.info("Q: {} - A: {}".format(__a , __a ) )
return answers
def UpperCamelCase_ ( ) -> List[str]:
a__ : int = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=__a , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=__a , choices=["exact", "compressed", "legacy"] , type=__a , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=__a , type=__a , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=__a , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=__a , type=__a , required=__a , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=__a , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=__a , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=__a , type=__a , required=__a , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=__a , type=__a , required=__a , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=__a , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=__a , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=__a , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=__a , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=__a , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=__a , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
a__ : int = parser.parse_args()
a__ : Dict = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def UpperCamelCase_ ( __a ) -> Optional[int]:
a__ : Tuple = {}
if args.model_type is None:
a__ : List[str] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
a__ : int = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
a__ : Tuple = args.n_docs
if args.index_name is not None:
a__ : Any = args.index_name
if args.index_path is not None:
a__ : int = args.index_path
else:
a__ : Optional[Any] = BartForConditionalGeneration
a__ : Tuple = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , __a )
a__ : Any = get_scores if args.eval_mode == "e2e" else get_precision_at_k
a__ : Union[str, Any] = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(__a , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(__a ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
a__ : str = RagRetriever.from_pretrained(__a , **__a )
a__ : Optional[int] = model_class.from_pretrained(__a , retriever=__a , **__a )
model.retriever.init_retrieval()
else:
a__ : Dict = model_class.from_pretrained(__a , **__a )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
a__ : List[Any] = []
for line in tqdm(__a ):
questions.append(line.strip() )
if len(__a ) == args.eval_batch_size:
a__ : Union[str, Any] = evaluate_batch_fn(__a , __a , __a )
preds_file.write("\n".join(__a ) + "\n" )
preds_file.flush()
a__ : Any = []
if len(__a ) > 0:
a__ : List[str] = evaluate_batch_fn(__a , __a , __a )
preds_file.write("\n".join(__a ) )
preds_file.flush()
score_fn(__a , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
UpperCamelCase : List[Any] = get_args()
main(args)
| 37 | 0 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def snake_case ( a_ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class A ( nn.Module ):
"""simple docstring"""
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
super().__init__()
UpperCamelCase_ : int = module
UpperCamelCase_ : Any = nn.Sequential(
nn.Linear(module.in_features , lowerCamelCase__ , bias=lowerCamelCase__ ) , nn.Linear(lowerCamelCase__ , module.out_features , bias=lowerCamelCase__ ) , )
UpperCamelCase_ : Tuple = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCamelCase__ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def _UpperCAmelCase ( self , __lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ):
return self.module(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) + self.adapter(lowerCamelCase__ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A ( unittest.TestCase ):
"""simple docstring"""
__a : Union[str, Any] = '''bigscience/bloom-1b7'''
# Constant values
__a : Tuple = 2.109_659_552_692_574
__a : Optional[Any] = '''Hello my name is'''
__a : int = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
__a : Optional[int] = 10
def _UpperCAmelCase ( self ):
# Models and tokenizer
UpperCamelCase_ : List[str] = AutoTokenizer.from_pretrained(self.model_name )
class A ( A__ ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
super().setUp()
# Models and tokenizer
UpperCamelCase_ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""" )
UpperCamelCase_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="""auto""" )
def _UpperCAmelCase ( self ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ):
UpperCamelCase_ : str = self.model_abit.config
self.assertTrue(hasattr(lowerCamelCase__ , """quantization_config""" ) )
UpperCamelCase_ : Optional[Any] = config.to_dict()
UpperCamelCase_ : int = config.to_diff_dict()
UpperCamelCase_ : List[str] = config.to_json_string()
def _UpperCAmelCase ( self ):
from bitsandbytes.nn import Paramsabit
UpperCamelCase_ : List[Any] = self.model_fpaa.get_memory_footprint()
UpperCamelCase_ : str = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
UpperCamelCase_ : Optional[Any] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def _UpperCAmelCase ( self ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCamelCase__ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Dict = self.tokenizer(self.input_text , return_tensors="""pt""" )
UpperCamelCase_ : Tuple = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Optional[Any] = BitsAndBytesConfig()
UpperCamelCase_ : Optional[int] = True
UpperCamelCase_ : int = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCamelCase__ , device_map="""auto""" )
UpperCamelCase_ : str = self.tokenizer(self.input_text , return_tensors="""pt""" )
UpperCamelCase_ : int = model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS )
def _UpperCAmelCase ( self ):
with self.assertRaises(lowerCamelCase__ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCamelCase__ )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : int = BitsAndBytesConfig()
with self.assertRaises(lowerCamelCase__ ):
UpperCamelCase_ : Dict = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCamelCase__ , load_in_abit=lowerCamelCase__ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def _UpperCAmelCase ( self ):
with self.assertRaises(lowerCamelCase__ ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(lowerCamelCase__ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCamelCase__ ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(lowerCamelCase__ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCamelCase__ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
UpperCamelCase_ : int = self.tokenizer(self.input_text , return_tensors="""pt""" )
UpperCamelCase_ : Any = self.model_fpaa.to(torch.floataa )
UpperCamelCase_ : List[Any] = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
UpperCamelCase_ : Tuple = self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
UpperCamelCase_ : Tuple = self.model_fpaa.half()
# Check this does not throw an error
UpperCamelCase_ : Dict = self.model_fpaa.float()
def _UpperCAmelCase ( self ):
UpperCamelCase_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=lowerCamelCase__ , device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def _UpperCAmelCase ( cls ):
UpperCamelCase_ : Dict = "t5-small"
UpperCamelCase_ : List[Any] = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
UpperCamelCase_ : int = AutoTokenizer.from_pretrained(cls.model_name )
UpperCamelCase_ : str = "Translate in German: Hello, my dog is cute"
def _UpperCAmelCase ( self ):
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ):
from transformers import TaForConditionalGeneration
UpperCamelCase_ : List[Any] = TaForConditionalGeneration._keep_in_fpaa_modules
UpperCamelCase_ : Optional[Any] = None
# test with `t5-small`
UpperCamelCase_ : Any = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="""auto""" )
UpperCamelCase_ : Dict = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
UpperCamelCase_ : int = model.generate(**lowerCamelCase__ )
# test with `flan-t5-small`
UpperCamelCase_ : Dict = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCamelCase__ , device_map="""auto""" )
UpperCamelCase_ : Dict = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
UpperCamelCase_ : Any = model.generate(**lowerCamelCase__ )
UpperCamelCase_ : Union[str, Any] = modules
def _UpperCAmelCase ( self ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
UpperCamelCase_ : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
UpperCamelCase_ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
UpperCamelCase_ : int = model.generate(**lowerCamelCase__ )
# test with `flan-t5-small`
UpperCamelCase_ : int = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCamelCase__ , device_map="""auto""" )
UpperCamelCase_ : Any = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
UpperCamelCase_ : Optional[int] = model.generate(**lowerCamelCase__ )
class A ( A__ ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
super().setUp()
# model_name
UpperCamelCase_ : Union[str, Any] = "bigscience/bloom-560m"
UpperCamelCase_ : Union[str, Any] = "t5-small"
# Different types of model
UpperCamelCase_ : int = AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="""auto""" )
# Sequence classification model
UpperCamelCase_ : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCamelCase__ , device_map="""auto""" )
# CausalLM model
UpperCamelCase_ : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="""auto""" )
# Seq2seq model
UpperCamelCase_ : Dict = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCamelCase__ , device_map="""auto""" )
def _UpperCAmelCase ( self ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class A ( A__ ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
super().setUp()
def _UpperCAmelCase ( self ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ):
UpperCamelCase_ : int = pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
UpperCamelCase_ : Tuple = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class A ( A__ ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
super().setUp()
def _UpperCAmelCase ( self ):
UpperCamelCase_ : str = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCamelCase__ , device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
UpperCamelCase_ : List[Any] = self.tokenizer(self.input_text , return_tensors="""pt""" )
# Second real batch
UpperCamelCase_ : List[Any] = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS )
class A ( A__ ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Any = "facebook/opt-350m"
super().setUp()
def _UpperCAmelCase ( self ):
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
UpperCamelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
UpperCamelCase_ : Any = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
UpperCamelCase_ : Tuple = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCamelCase__ ) ):
UpperCamelCase_ : Dict = LoRALayer(module.q_proj , rank=16 )
UpperCamelCase_ : List[Any] = LoRALayer(module.k_proj , rank=16 )
UpperCamelCase_ : List[Any] = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
UpperCamelCase_ : Dict = self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
UpperCamelCase_ : Optional[Any] = model.forward(**lowerCamelCase__ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCamelCase__ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class A ( A__ ):
"""simple docstring"""
__a : List[str] = '''gpt2-xl'''
__a : Union[str, Any] = 3.3_191_854_854_152_187
| 208 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a = None , ) -> str:
a__ : int = {}
if train_file is not None:
a__ : int = [train_file]
if eval_file is not None:
a__ : Union[str, Any] = [eval_file]
if test_file is not None:
a__ : str = [test_file]
a__ : Optional[Any] = datasets.load_dataset("csv" , data_files=__a )
a__ : List[Any] = list(ds[list(files.keys() )[0]].features.keys() )
a__ : str = features_name.pop(__a )
a__ : Dict = list(set(ds[list(files.keys() )[0]][label_name] ) )
a__ : str = {label: i for i, label in enumerate(__a )}
a__ : Tuple = tokenizer.model_input_names
a__ : List[str] = {}
if len(__a ) == 1:
for k in files.keys():
a__ : Optional[Any] = ds[k].map(
lambda __a : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__a , max_length=__a , padding="max_length" ) , batched=__a , )
elif len(__a ) == 2:
for k in files.keys():
a__ : Dict = ds[k].map(
lambda __a : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__a , max_length=__a , padding="max_length" , ) , batched=__a , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
a__ : str = {k: v for k, v in ex.items() if k in input_names}
a__ : str = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
a__ : Tuple = {k: v for k, v in ex.items() if k in input_names}
a__ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
a__ : List[Any] = {k: v for k, v in ex.items() if k in input_names}
a__ : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
a__ : Optional[Any] = (
tf.data.Dataset.from_generator(
__a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
a__ : Optional[int] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
a__ : Union[str, Any] = (
tf.data.Dataset.from_generator(
__a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
a__ : Optional[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
a__ : Union[str, Any] = (
tf.data.Dataset.from_generator(
__a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
a__ : Tuple = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
UpperCamelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass
class A__ :
"""simple docstring"""
_lowercase = field(metadata={'help': 'Which column contains the label'} )
_lowercase = field(default=A__ , metadata={'help': 'The path of the training file'} )
_lowercase = field(default=A__ , metadata={'help': 'The path of the development file'} )
_lowercase = field(default=A__ , metadata={'help': 'The path of the test file'} )
_lowercase = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_lowercase = field(
default=A__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class A__ :
"""simple docstring"""
_lowercase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_lowercase = field(
default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_lowercase = field(
default=A__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_lowercase = field(default=A__ , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_lowercase = field(
default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def UpperCamelCase_ ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a__ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
a__, a__, a__ : str = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a__ : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a__, a__, a__, a__ : Optional[Any] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__a , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
a__ : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__a ) , labelaid=__a , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
a__ : Any = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , )
def compute_metrics(__a ) -> Dict:
a__ : Union[str, Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
a__ : Dict = TFTrainer(
model=__a , args=__a , train_dataset=__a , eval_dataset=__a , compute_metrics=__a , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a__ : Optional[Any] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a__ : Dict = trainer.evaluate()
a__ : int = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(__a , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(__a )
return results
if __name__ == "__main__":
main()
| 37 | 0 |
import re
from filelock import FileLock
try:
import nltk
lowercase : Union[str, Any] = True
except (ImportError, ModuleNotFoundError):
lowercase : Optional[int] = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def UpperCAmelCase_ ( _UpperCAmelCase ):
re.sub("""<n>""" , """""" , __a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__a ) )
| 423 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
UpperCamelCase : List[str] = re.compile(r"""\b(a|an|the)\b""", re.UNICODE)
UpperCamelCase : Union[str, Any] = None
def UpperCamelCase_ ( ) -> List[str]:
a__ : List[Any] = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=__a , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=__a , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def UpperCamelCase_ ( __a ) -> str:
a__ : Optional[Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a__ : Dict = bool(qa["answers"]["text"] )
return qid_to_has_ans
def UpperCamelCase_ ( __a ) -> List[Any]:
def remove_articles(__a ):
return ARTICLES_REGEX.sub(" " , __a )
def white_space_fix(__a ):
return " ".join(text.split() )
def remove_punc(__a ):
a__ : Union[str, Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__a ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__a ) ) ) )
def UpperCamelCase_ ( __a ) -> Dict:
if not s:
return []
return normalize_answer(__a ).split()
def UpperCamelCase_ ( __a , __a ) -> str:
return int(normalize_answer(__a ) == normalize_answer(__a ) )
def UpperCamelCase_ ( __a , __a ) -> Dict:
a__ : int = get_tokens(__a )
a__ : Optional[Any] = get_tokens(__a )
a__ : Any = collections.Counter(__a ) & collections.Counter(__a )
a__ : Dict = sum(common.values() )
if len(__a ) == 0 or len(__a ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
a__ : Tuple = 1.0 * num_same / len(__a )
a__ : str = 1.0 * num_same / len(__a )
a__ : str = (2 * precision * recall) / (precision + recall)
return fa
def UpperCamelCase_ ( __a , __a ) -> int:
a__ : List[str] = {}
a__ : Optional[int] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a__ : List[Any] = qa["id"]
a__ : Dict = [t for t in qa["answers"]["text"] if normalize_answer(__a )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
a__ : Tuple = [""]
if qid not in preds:
print(f'''Missing prediction for {qid}''' )
continue
a__ : Tuple = preds[qid]
# Take max over all gold answers
a__ : Optional[int] = max(compute_exact(__a , __a ) for a in gold_answers )
a__ : str = max(compute_fa(__a , __a ) for a in gold_answers )
return exact_scores, fa_scores
def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[int]:
a__ : Optional[Any] = {}
for qid, s in scores.items():
a__ : Dict = na_probs[qid] > na_prob_thresh
if pred_na:
a__ : Dict = float(not qid_to_has_ans[qid] )
else:
a__ : Optional[Any] = s
return new_scores
def UpperCamelCase_ ( __a , __a , __a=None ) -> Tuple:
if not qid_list:
a__ : Union[str, Any] = len(__a )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
a__ : int = len(__a )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def UpperCamelCase_ ( __a , __a , __a ) -> List[str]:
for k in new_eval:
a__ : Optional[Any] = new_eval[k]
def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[int]:
plt.step(__a , __a , color="b" , alpha=0.2 , where="post" )
plt.fill_between(__a , __a , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__a )
plt.savefig(__a )
plt.clf()
def UpperCamelCase_ ( __a , __a , __a , __a , __a=None , __a=None ) -> Dict:
a__ : Optional[Any] = sorted(__a , key=lambda __a : na_probs[k] )
a__ : Any = 0.0
a__ : Optional[int] = 1.0
a__ : Optional[int] = 0.0
a__ : Any = [1.0]
a__ : Tuple = [0.0]
a__ : List[str] = 0.0
for i, qid in enumerate(__a ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
a__ : Any = true_pos / float(i + 1 )
a__ : int = true_pos / float(__a )
if i == len(__a ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__a )
recalls.append(__a )
if out_image:
plot_pr_curve(__a , __a , __a , __a )
return {"ap": 100.0 * avg_prec}
def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a ) -> str:
if out_image_dir and not os.path.exists(__a ):
os.makedirs(__a )
a__ : Optional[int] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
a__ : Optional[int] = make_precision_recall_eval(
__a , __a , __a , __a , out_image=os.path.join(__a , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
a__ : Optional[Any] = make_precision_recall_eval(
__a , __a , __a , __a , out_image=os.path.join(__a , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
a__ : str = {k: float(__a ) for k, v in qid_to_has_ans.items()}
a__ : Optional[Any] = make_precision_recall_eval(
__a , __a , __a , __a , out_image=os.path.join(__a , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(__a , __a , "pr_exact" )
merge_eval(__a , __a , "pr_f1" )
merge_eval(__a , __a , "pr_oracle" )
def UpperCamelCase_ ( __a , __a , __a , __a ) -> str:
if not qid_list:
return
a__ : Optional[Any] = [na_probs[k] for k in qid_list]
a__ : str = np.ones_like(__a ) / float(len(__a ) )
plt.hist(__a , weights=__a , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(f'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(__a , f'''na_prob_hist_{name}.png''' ) )
plt.clf()
def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[Any]:
a__ : str = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
a__ : Optional[Any] = num_no_ans
a__ : Dict = cur_score
a__ : Any = 0.0
a__ : Optional[Any] = sorted(__a , key=lambda __a : na_probs[k] )
for i, qid in enumerate(__a ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
a__ : Optional[int] = scores[qid]
else:
if preds[qid]:
a__ : str = -1
else:
a__ : Union[str, Any] = 0
cur_score += diff
if cur_score > best_score:
a__ : Any = cur_score
a__ : Dict = na_probs[qid]
return 100.0 * best_score / len(__a ), best_thresh
def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a ) -> Any:
a__, a__ : Tuple = find_best_thresh(__a , __a , __a , __a )
a__, a__ : Tuple = find_best_thresh(__a , __a , __a , __a )
a__ : Any = best_exact
a__ : Any = exact_thresh
a__ : List[Any] = best_fa
a__ : Optional[int] = fa_thresh
def UpperCamelCase_ ( ) -> Tuple:
with open(OPTS.data_file ) as f:
a__ : List[Any] = json.load(__a )
a__ : Any = dataset_json["data"]
with open(OPTS.pred_file ) as f:
a__ : int = json.load(__a )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
a__ : List[str] = json.load(__a )
else:
a__ : Optional[int] = {k: 0.0 for k in preds}
a__ : Optional[Any] = make_qid_to_has_ans(__a ) # maps qid to True/False
a__ : List[Any] = [k for k, v in qid_to_has_ans.items() if v]
a__ : Union[str, Any] = [k for k, v in qid_to_has_ans.items() if not v]
a__, a__ : str = get_raw_scores(__a , __a )
a__ : str = apply_no_ans_threshold(__a , __a , __a , OPTS.na_prob_thresh )
a__ : str = apply_no_ans_threshold(__a , __a , __a , OPTS.na_prob_thresh )
a__ : Tuple = make_eval_dict(__a , __a )
if has_ans_qids:
a__ : str = make_eval_dict(__a , __a , qid_list=__a )
merge_eval(__a , __a , "HasAns" )
if no_ans_qids:
a__ : List[Any] = make_eval_dict(__a , __a , qid_list=__a )
merge_eval(__a , __a , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(__a , __a , __a , __a , __a , __a )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__a , __a , __a , __a , __a , OPTS.out_image_dir )
histogram_na_prob(__a , __a , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(__a , __a , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(__a , __a )
else:
print(json.dumps(__a , indent=2 ) )
if __name__ == "__main__":
UpperCamelCase : Any = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 37 | 0 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def a_ ( ):
__lowerCAmelCase = 9, 14 # noqa: F841
__lowerCAmelCase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__lowerCAmelCase = defaultdict(__a )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
__lowerCAmelCase = mst(__a )
__lowerCAmelCase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
__lowerCAmelCase = tuple(answer[:2] )
__lowerCAmelCase = tuple(edge[::-1] )
assert edge in result or reverse in result
| 53 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = CLIPTokenizer
_lowercase = CLIPTokenizerFast
_lowercase = True
_lowercase = {}
_lowercase = False
def _UpperCamelCase( self : List[Any] ):
super().setUp()
# fmt: off
a__ : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
a__ : Optional[Any] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
a__ : Optional[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
a__ : Optional[Any] = {"unk_token": "<unk>"}
a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCamelCase__ ) )
def _UpperCamelCase( self : Dict , **lowerCamelCase__ : int ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def _UpperCamelCase( self : Union[str, Any] , **lowerCamelCase__ : Optional[int] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : Optional[Any] ):
a__ : int = "lower newer"
a__ : Optional[int] = "lower newer"
return input_text, output_text
def _UpperCamelCase( self : List[str] ):
a__ : Union[str, Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a__ : int = "lower newer"
a__ : List[str] = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
a__ : Union[str, Any] = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : int = tokens + [tokenizer.unk_token]
a__ : Union[str, Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
@require_ftfy
def _UpperCamelCase( self : Optional[Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a__ : List[str] = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
a__ : Any = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
a__ : int = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
a__ : Optional[Any] = tokenizer_s.tokenize(lowerCamelCase__ )
a__ : Dict = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
a__ : Optional[Any] = "xa\u0303y" + " " + "x\xe3y"
a__ : Optional[int] = tokenizer_s.tokenize(lowerCamelCase__ )
a__ : int = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Test that the tokenization is identical on unicode of space type
a__ : str = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
a__ : Any = tokenizer_s.tokenize(lowerCamelCase__ )
a__ : int = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Test that the tokenization is identical on unicode of line break type
a__ : Union[str, Any] = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
a__ : List[Any] = tokenizer_s.tokenize(lowerCamelCase__ )
a__ : int = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a__ : str = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
a__ : Tuple = f'''{text_of_1_token} {text_of_1_token}'''
a__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , )
a__ : Union[str, Any] = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase__ ) + 1, len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
a__ : Optional[Any] = f''' {text}'''
a__ : str = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , )
a__ : Dict = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase__ ) + 1, 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
def _UpperCamelCase( self : int ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowerCamelCase__ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def _UpperCamelCase( self : int ):
super().test_tokenization_python_rust_equals()
def _UpperCamelCase( self : str ):
# CLIP always lower cases letters
pass
| 37 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A : Optional[int] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = ["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = ["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 130 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
UpperCamelCase : Dict = """<<<<<<< This should probably be modified because it mentions: """
UpperCamelCase : List[Any] = """=======
>>>>>>>
"""
UpperCamelCase : Optional[Any] = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
UpperCamelCase : Any = [
# (pattern, replacement)
# Order is important here for some replacements
(r"""tfds\.core""", r"""datasets"""),
(r"""tf\.io\.gfile\.GFile""", r"""open"""),
(r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""),
(r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""),
(r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""),
(r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""),
(r"""tfds\.features\.FeaturesDict\(""", r"""dict("""),
(r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(r"""tfds\.""", r"""datasets."""),
(r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""),
(r"""self\.builder_config""", r"""self.config"""),
]
def UpperCamelCase_ ( __a ) -> Optional[Any]:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class A__ ( A__ ):
"""simple docstring"""
@staticmethod
def _UpperCamelCase( lowerCamelCase__ : ArgumentParser ):
a__ : List[str] = parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str , *lowerCamelCase__ : Tuple ):
a__ : str = get_logger("datasets-cli/converting" )
a__ : Optional[Any] = tfds_path
a__ : Optional[int] = datasets_directory
def _UpperCamelCase( self : int ):
if os.path.isdir(self._tfds_path ):
a__ : List[str] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
a__ : Any = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
a__ : Dict = os.path.abspath(self._datasets_directory )
self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
a__ : Tuple = []
a__ : str = []
a__ : List[Any] = {}
if os.path.isdir(self._tfds_path ):
a__ : List[str] = os.listdir(lowerCamelCase__ )
else:
a__ : Union[str, Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'''Looking at file {f_name}''' )
a__ : Any = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
a__ : Dict = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
if not os.path.isfile(lowerCamelCase__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(lowerCamelCase__ , encoding="utf-8" ) as f:
a__ : List[Any] = f.readlines()
a__ : Union[str, Any] = []
a__ : Union[str, Any] = False
a__ : Union[str, Any] = False
a__ : Dict = []
for line in lines:
a__ : Optional[Any] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
a__ : List[Any] = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
a__ : List[str] = ""
continue
elif "from absl import logging" in out_line:
a__ : Dict = "from datasets import logging\n"
elif "getLogger" in out_line:
a__ : List[Any] = out_line.replace("getLogger" , "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
a__ : List[str] = True
a__ : Dict = list(filter(lambda lowerCamelCase__ : e in out_line , lowerCamelCase__ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCamelCase__ ) + "\n" )
out_lines.append(lowerCamelCase__ )
out_lines.append(lowerCamelCase__ )
continue
else:
for pattern, replacement in TO_CONVERT:
a__ : Tuple = re.sub(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
a__ : Optional[int] = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , lowerCamelCase__ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
a__ : Optional[Any] = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
a__ : Optional[int] = True
out_lines.append(lowerCamelCase__ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
a__ : Dict = f_name.replace(".py" , "" )
a__ : Optional[int] = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
a__ : Any = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
self._logger.info(f'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCamelCase__ )
if needs_manual_update:
with_manual_update.append(lowerCamelCase__ )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f:
f.writelines(lowerCamelCase__ )
self._logger.info(f'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
a__ : Any = os.path.basename(lowerCamelCase__ )
a__ : Optional[int] = imports_to_builder_map[f_name.replace(".py" , "" )]
self._logger.info(f'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(lowerCamelCase__ , lowerCamelCase__ )
except KeyError:
self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 37 | 0 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a_ :
def __init__( self : Optional[Any] , snake_case__ : Dict , snake_case__ : str=3 , snake_case__ : Dict=32 , snake_case__ : Dict=3 , snake_case__ : str=10 , snake_case__ : Tuple=[10, 20, 30, 40] , snake_case__ : Tuple=[1, 1, 2, 1] , snake_case__ : str=True , snake_case__ : List[str]=True , snake_case__ : Tuple="relu" , snake_case__ : List[str]=3 , snake_case__ : int=None , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = embeddings_size
lowerCAmelCase__ = hidden_sizes
lowerCAmelCase__ = depths
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = scope
lowerCAmelCase__ = len(lowerCamelCase__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Any ):
lowerCAmelCase__ = TFResNetModel(config=lowerCamelCase__ )
lowerCAmelCase__ = model(lowerCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFResNetForImageClassification(lowerCamelCase__ )
lowerCAmelCase__ = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class a_ ( A__ , A__ , unittest.TestCase ):
UpperCamelCase_ : Optional[int] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
UpperCamelCase_ : Any = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase_ : Union[str, Any] = False
UpperCamelCase_ : List[str] = False
UpperCamelCase_ : Union[str, Any] = False
UpperCamelCase_ : Union[str, Any] = False
UpperCamelCase_ : Dict = False
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = TFResNetModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self : Any ):
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def _SCREAMING_SNAKE_CASE ( self : Any ):
pass
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(lowerCamelCase__ )
lowerCAmelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
def check_hidden_states_output(snake_case__ : Any , snake_case__ : Any , snake_case__ : str ):
lowerCAmelCase__ = model_class(lowerCamelCase__ )
lowerCAmelCase__ = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
lowerCAmelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase__ = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase__ = layer_type
lowerCAmelCase__ = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFResNetModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self : int ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=lowerCamelCase__ , return_tensors="""tf""" )
# forward pass
lowerCAmelCase__ = model(**lowerCamelCase__ )
# verify the logits
lowerCAmelCase__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
lowerCAmelCase__ = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCamelCase__ , atol=1E-4 ) )
| 644 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class A__ ( A__ ):
"""simple docstring"""
_lowercase = ''
_lowercase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_lowercase = None # compression type in fsspec. ex: "gzip"
_lowercase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : List[str] , lowerCamelCase__ : str = "" , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[dict] = None , **lowerCamelCase__ : List[str] ):
super().__init__(self , **lowerCamelCase__ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
a__ : str = fsspec.open(
lowerCamelCase__ , mode="rb" , protocol=lowerCamelCase__ , compression=self.compression , client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
a__ : Optional[int] = os.path.basename(self.file.path.split("::" )[0] )
a__ : int = (
self.compressed_name[: self.compressed_name.rindex("." )]
if "." in self.compressed_name
else self.compressed_name
)
a__ : List[Any] = None
@classmethod
def _UpperCamelCase( cls : int , lowerCamelCase__ : int ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowerCamelCase__ ).lstrip("/" )
def _UpperCamelCase( self : Dict ):
if self.dir_cache is None:
a__ : Dict = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name}
a__ : int = {f["name"]: f}
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : str ):
return self.file.open().read()
def _UpperCamelCase( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str = "rb" , lowerCamelCase__ : int=None , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[str]=None , **lowerCamelCase__ : Optional[Any] , ):
a__ : Optional[int] = self._strip_protocol(lowerCamelCase__ )
if mode != "rb":
raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'bz2'
_lowercase = 'bz2'
_lowercase = '.bz2'
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'gzip'
_lowercase = 'gzip'
_lowercase = '.gz'
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'lz4'
_lowercase = 'lz4'
_lowercase = '.lz4'
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'xz'
_lowercase = 'xz'
_lowercase = '.xz'
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'zstd'
_lowercase = 'zstd'
_lowercase = '.zst'
def __init__( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : str = "rb" , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[dict] = None , lowerCamelCase__ : int = DEFAULT_BLOCK_SIZE , **lowerCamelCase__ : Tuple , ):
super().__init__(
fo=lowerCamelCase__ , mode=lowerCamelCase__ , target_protocol=lowerCamelCase__ , target_options=lowerCamelCase__ , block_size=lowerCamelCase__ , **lowerCamelCase__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
a__ : Any = self.file.__enter__
class A__ :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase__ : str ):
a__ : List[Any] = file_
def __enter__( self : str ):
self._file.__enter__()
return self
def __exit__( self : int , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : int ):
self._file.__exit__(*lowerCamelCase__ , **lowerCamelCase__ )
def __iter__( self : List[str] ):
return iter(self._file )
def _UpperCamelCase( self : Any ):
return next(self._file )
def __getattr__( self : Optional[Any] , lowerCamelCase__ : Tuple ):
return getattr(self._file , lowerCamelCase__ )
def fixed_enter(*lowerCamelCase__ : List[str] , **lowerCamelCase__ : str ):
return WrappedFile(_enter(*lowerCamelCase__ , **lowerCamelCase__ ) )
a__ : Any = fixed_enter
| 37 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=A__ ):
'''simple docstring'''
a__ =['''note_seq''']
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ['''note_seq'''] )
@classmethod
def __lowerCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ['''note_seq'''] )
@classmethod
def __lowerCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ['''note_seq'''] )
| 506 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]:
a__ : Union[str, Any] = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
a__ : Union[str, Any] = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__a ):
os.makedirs(__a )
a__ : Any = model.state_dict()
def to_tf_var_name(__a ):
for patt, repl in iter(__a ):
a__ : Tuple = name.replace(__a , __a )
return f'''bert/{name}'''
def create_tf_var(__a , __a , __a ):
a__ : Tuple = tf.dtypes.as_dtype(tensor.dtype )
a__ : Dict = tf.get_variable(dtype=__a , shape=tensor.shape , name=__a , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__a )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
a__ : int = to_tf_var_name(__a )
a__ : Union[str, Any] = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
a__ : int = torch_tensor.T
a__ : Optional[Any] = create_tf_var(tensor=__a , name=__a , session=__a )
tf.keras.backend.set_value(__a , __a )
a__ : int = session.run(__a )
print(f'''Successfully created {tf_name}: {np.allclose(__a , __a )}''' )
a__ : Any = tf.train.Saver(tf.trainable_variables() )
saver.save(__a , os.path.join(__a , model_name.replace("-" , "_" ) + ".ckpt" ) )
def UpperCamelCase_ ( __a=None ) -> int:
a__ : Dict = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=__a , required=__a , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=__a , default=__a , required=__a , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=__a , required=__a , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=__a , required=__a , help="Directory in which to save tensorflow model" )
a__ : Optional[Any] = parser.parse_args(__a )
a__ : Tuple = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__a , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 37 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class A ( A__ ):
def __init__( self ) -> Any:
_a = []
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> Optional[Any]:
self.events.append("on_init_end" )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> Dict:
self.events.append("on_train_begin" )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> str:
self.events.append("on_train_end" )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> Any:
self.events.append("on_epoch_begin" )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> str:
self.events.append("on_epoch_end" )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> List[str]:
self.events.append("on_step_begin" )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> Optional[Any]:
self.events.append("on_step_end" )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> List[str]:
self.events.append("on_evaluate" )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> Optional[int]:
self.events.append("on_predict" )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> Any:
self.events.append("on_save" )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> Dict:
self.events.append("on_log" )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> Optional[int]:
self.events.append("on_prediction_step" )
@require_torch
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Tuple:
_a = tempfile.mkdtemp()
def __lowerCAmelCase ( self ) -> Dict:
shutil.rmtree(self.output_dir )
def __lowerCAmelCase ( self , snake_case_=0 , snake_case_=0 , snake_case_=6_4 , snake_case_=6_4 , snake_case_=None , snake_case_=False , **snake_case_ ) -> Dict:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
_a = RegressionDataset(length=lowerCamelCase__ )
_a = RegressionDataset(length=lowerCamelCase__ )
_a = RegressionModelConfig(a=lowerCamelCase__ , b=lowerCamelCase__ )
_a = RegressionPreTrainedModel(lowerCamelCase__ )
_a = TrainingArguments(self.output_dir , disable_tqdm=lowerCamelCase__ , report_to=[] , **lowerCamelCase__ )
return Trainer(
lowerCamelCase__ , lowerCamelCase__ , train_dataset=lowerCamelCase__ , eval_dataset=lowerCamelCase__ , callbacks=lowerCamelCase__ , )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> List[str]:
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
# Order doesn't matter
_a = sorted(lowerCamelCase__ , key=lambda snake_case_ : cb.__name__ if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else cb.__class__.__name__ )
_a = sorted(lowerCamelCase__ , key=lambda snake_case_ : cb.__name__ if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else cb.__class__.__name__ )
for cba, cba in zip(lowerCamelCase__ , lowerCamelCase__ ):
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ) and not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(lowerCamelCase__ , cba.__class__ )
elif not isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(cba.__class__ , lowerCamelCase__ )
else:
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def __lowerCAmelCase ( self , snake_case_ ) -> Dict:
_a = ["on_init_end", "on_train_begin"]
_a = 0
_a = len(trainer.get_eval_dataloader() )
_a = ["on_prediction_step"] * len(trainer.get_eval_dataloader() ) + ["on_log", "on_evaluate"]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("on_epoch_begin" )
for _ in range(lowerCamelCase__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("on_log" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("on_save" )
expected_events.append("on_epoch_end" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __lowerCAmelCase ( self ) -> Any:
_a = self.get_trainer()
_a = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCamelCase__ )
# Callbacks passed at init are added to the default callbacks
_a = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowerCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCamelCase__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
_a = self.get_trainer(disable_tqdm=lowerCamelCase__ )
_a = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCamelCase__ )
def __lowerCAmelCase ( self ) -> str:
_a = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
_a = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowerCamelCase__ )
expected_callbacks.remove(lowerCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCamelCase__ )
_a = self.get_trainer()
_a = trainer.pop_callback(lowerCamelCase__ )
self.assertEqual(cb.__class__ , lowerCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCamelCase__ )
trainer.add_callback(lowerCamelCase__ )
expected_callbacks.insert(0 , lowerCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCamelCase__ )
# We can also add, pop, or remove by instance
_a = self.get_trainer()
_a = trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowerCamelCase__ )
expected_callbacks.remove(lowerCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCamelCase__ )
_a = self.get_trainer()
_a = trainer.callback_handler.callbacks[0]
_a = trainer.pop_callback(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCamelCase__ )
trainer.add_callback(lowerCamelCase__ )
expected_callbacks.insert(0 , lowerCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCamelCase__ )
def __lowerCAmelCase ( self ) -> Tuple:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="ignore" , category=lowerCamelCase__ )
_a = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
_a = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCamelCase__ , self.get_expected_events(lowerCamelCase__ ) )
# Independent log/save/eval
_a = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
_a = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCamelCase__ , self.get_expected_events(lowerCamelCase__ ) )
_a = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
_a = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCamelCase__ , self.get_expected_events(lowerCamelCase__ ) )
_a = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps" )
trainer.train()
_a = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCamelCase__ , self.get_expected_events(lowerCamelCase__ ) )
_a = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch" )
trainer.train()
_a = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCamelCase__ , self.get_expected_events(lowerCamelCase__ ) )
# A bit of everything
_a = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy="steps" , )
trainer.train()
_a = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCamelCase__ , self.get_expected_events(lowerCamelCase__ ) )
# warning should be emitted for duplicated callbacks
with patch("transformers.trainer_callback.logger.warning" ) as warn_mock:
_a = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(lowerCamelCase__ ) in warn_mock.call_args[0][0]
| 131 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class A__ :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str=13 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Any=24 , lowerCamelCase__ : Optional[Any]=16 , lowerCamelCase__ : int=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[Any]=32 , lowerCamelCase__ : List[str]=5 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Optional[Any]=37 , lowerCamelCase__ : Any="gelu" , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : str=10 , lowerCamelCase__ : Optional[Any]=0.02 , lowerCamelCase__ : str=None , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : Optional[Any]=2 , ):
a__ : str = parent
a__ : Any = batch_size
a__ : Dict = patch_size
a__ : List[Any] = max_length
a__ : str = num_mel_bins
a__ : Optional[Any] = is_training
a__ : Optional[int] = use_labels
a__ : List[Any] = hidden_size
a__ : str = num_hidden_layers
a__ : Any = num_attention_heads
a__ : Union[str, Any] = intermediate_size
a__ : List[str] = hidden_act
a__ : str = hidden_dropout_prob
a__ : Tuple = attention_probs_dropout_prob
a__ : List[Any] = type_sequence_label_size
a__ : Any = initializer_range
a__ : str = scope
a__ : List[str] = frequency_stride
a__ : Union[str, Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
a__ : List[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
a__ : List[str] = (self.max_length - self.patch_size) // self.time_stride + 1
a__ : Tuple = frequency_out_dimension * time_out_dimension
a__ : List[str] = num_patches + 2
def _UpperCamelCase( self : List[str] ):
a__ : Any = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
a__ : List[Any] = None
if self.use_labels:
a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : List[str] = self.get_config()
return config, input_values, labels
def _UpperCamelCase( self : Optional[int] ):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] ):
a__ : List[Any] = ASTModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : Dict = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase( self : str ):
a__ : Dict = self.prepare_config_and_inputs()
(
(
a__
), (
a__
), (
a__
),
) : Optional[int] = config_and_inputs
a__ : List[Any] = {"input_values": input_values}
return config, inputs_dict
@require_torch
class A__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
_lowercase = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
_lowercase = False
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _UpperCamelCase( self : str ):
a__ : str = ASTModelTester(self )
a__ : Any = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def _UpperCamelCase( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def _UpperCamelCase( self : List[str] ):
pass
def _UpperCamelCase( self : Optional[int] ):
a__, a__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Any = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def _UpperCamelCase( self : Tuple ):
a__, a__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Dict = model_class(lowerCamelCase__ )
a__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Optional[int] = [*signature.parameters.keys()]
a__ : Optional[Any] = ["input_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
a__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
@slow
def _UpperCamelCase( self : int ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Union[str, Any] = ASTModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def UpperCamelCase_ ( ) -> Any:
a__ : Optional[int] = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" )
a__, a__ : List[str] = torchaudio.load(__a )
return audio, sampling_rate
@require_torch
@require_torchaudio
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase( self : List[str] ):
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def _UpperCamelCase( self : Optional[int] ):
a__ : int = self.default_feature_extractor
a__ : Optional[Any] = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(lowerCamelCase__ )
a__ : Any = self.default_feature_extractor
a__, a__ : Dict = prepare_audio()
a__ : str = audio.squeeze().numpy()
a__ : Any = feature_extractor(lowerCamelCase__ , sampling_rate=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Any = model(**lowerCamelCase__ )
# verify the logits
a__ : Union[str, Any] = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
a__ : List[str] = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 37 | 0 |
"""simple docstring"""
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class a__ :
@staticmethod
def a_ ( *UpperCamelCase_ : Dict , **UpperCamelCase_ : str):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class a__ ( unittest.TestCase ):
@require_torch
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : int = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
__UpperCAmelCase : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
__UpperCAmelCase : Optional[int] = image_classifier(lowerCamelCase__ , candidate_labels=["a", "b", "c"])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(lowerCamelCase__) , [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
] , )
__UpperCAmelCase : Tuple = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2)
self.assertEqual(
nested_simplify(lowerCamelCase__) , [
[
{"score": 0.333, "label": ANY(lowerCamelCase__)},
{"score": 0.333, "label": ANY(lowerCamelCase__)},
{"score": 0.333, "label": ANY(lowerCamelCase__)},
],
[
{"score": 0.333, "label": ANY(lowerCamelCase__)},
{"score": 0.333, "label": ANY(lowerCamelCase__)},
{"score": 0.333, "label": ANY(lowerCamelCase__)},
],
[
{"score": 0.333, "label": ANY(lowerCamelCase__)},
{"score": 0.333, "label": ANY(lowerCamelCase__)},
{"score": 0.333, "label": ANY(lowerCamelCase__)},
],
[
{"score": 0.333, "label": ANY(lowerCamelCase__)},
{"score": 0.333, "label": ANY(lowerCamelCase__)},
{"score": 0.333, "label": ANY(lowerCamelCase__)},
],
[
{"score": 0.333, "label": ANY(lowerCamelCase__)},
{"score": 0.333, "label": ANY(lowerCamelCase__)},
{"score": 0.333, "label": ANY(lowerCamelCase__)},
],
] , )
@require_tf
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : List[str] = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf")
__UpperCAmelCase : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
__UpperCAmelCase : str = image_classifier(lowerCamelCase__ , candidate_labels=["a", "b", "c"])
self.assertEqual(
nested_simplify(lowerCamelCase__) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , )
__UpperCAmelCase : str = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2)
self.assertEqual(
nested_simplify(lowerCamelCase__) , [
[
{"score": 0.333, "label": ANY(lowerCamelCase__)},
{"score": 0.333, "label": ANY(lowerCamelCase__)},
{"score": 0.333, "label": ANY(lowerCamelCase__)},
],
[
{"score": 0.333, "label": ANY(lowerCamelCase__)},
{"score": 0.333, "label": ANY(lowerCamelCase__)},
{"score": 0.333, "label": ANY(lowerCamelCase__)},
],
[
{"score": 0.333, "label": ANY(lowerCamelCase__)},
{"score": 0.333, "label": ANY(lowerCamelCase__)},
{"score": 0.333, "label": ANY(lowerCamelCase__)},
],
[
{"score": 0.333, "label": ANY(lowerCamelCase__)},
{"score": 0.333, "label": ANY(lowerCamelCase__)},
{"score": 0.333, "label": ANY(lowerCamelCase__)},
],
[
{"score": 0.333, "label": ANY(lowerCamelCase__)},
{"score": 0.333, "label": ANY(lowerCamelCase__)},
{"score": 0.333, "label": ANY(lowerCamelCase__)},
],
] , )
@slow
@require_torch
def a_ ( self : str):
"""simple docstring"""
__UpperCAmelCase : List[Any] = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
__UpperCAmelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
__UpperCAmelCase : Any = image_classifier(lowerCamelCase__ , candidate_labels=["cat", "plane", "remote"])
self.assertEqual(
nested_simplify(lowerCamelCase__) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
__UpperCAmelCase : int = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2)
self.assertEqual(
nested_simplify(lowerCamelCase__) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : Dict = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf")
# This is an image of 2 cats with remotes and no planes
__UpperCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
__UpperCAmelCase : int = image_classifier(lowerCamelCase__ , candidate_labels=["cat", "plane", "remote"])
self.assertEqual(
nested_simplify(lowerCamelCase__) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
__UpperCAmelCase : List[Any] = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2)
self.assertEqual(
nested_simplify(lowerCamelCase__) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
| 77 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class A__ ( A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = XGLMTokenizer
_lowercase = XGLMTokenizerFast
_lowercase = True
_lowercase = True
def _UpperCamelCase( self : List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
a__ : str = XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCamelCase( self : List[Any] ):
a__ : int = "<pad>"
a__ : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
a__ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(len(lowerCamelCase__ ) , 1_008 )
def _UpperCamelCase( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_008 )
def _UpperCamelCase( self : Optional[int] ):
a__ : str = XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
a__ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
a__ : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a__ : List[str] = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
a__ : Dict = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _UpperCamelCase( self : Dict ):
return XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
def _UpperCamelCase( self : Union[str, Any] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCamelCase__ , f.name )
a__ : Any = XGLMTokenizer(f.name , keep_accents=lowerCamelCase__ )
a__ : List[str] = pickle.dumps(lowerCamelCase__ )
pickle.loads(lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
if not self.test_rust_tokenizer:
return
a__ : Any = self.get_tokenizer()
a__ : Optional[Any] = self.get_rust_tokenizer()
a__ : Tuple = "I was born in 92000, and this is falsé."
a__ : List[str] = tokenizer.tokenize(lowerCamelCase__ )
a__ : Union[str, Any] = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : Optional[int] = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
a__ : Union[str, Any] = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : List[str] = self.get_rust_tokenizer()
a__ : Tuple = tokenizer.encode(lowerCamelCase__ )
a__ : Optional[Any] = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def _UpperCamelCase( self : List[str] ):
a__ : Union[str, Any] = "Hello World!"
a__ : List[str] = [2, 31_227, 4_447, 35]
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def _UpperCamelCase( self : Union[str, Any] ):
a__ : Optional[int] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
a__ : Union[str, Any] = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def _UpperCamelCase( self : List[Any] ):
# fmt: off
a__ : Optional[int] = {
"input_ids": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name="facebook/xglm-564M" , padding=lowerCamelCase__ , )
| 37 | 0 |
"""simple docstring"""
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
lowerCamelCase_ = (
"""This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"""
)
def __lowerCamelCase ( a_ : List[str] , a_ : Any ) -> str:
warnings.warn(__a , __a )
requires_backends(__a , '''sklearn''' )
return (preds == labels).mean()
def __lowerCamelCase ( a_ : str , a_ : Any ) -> List[Any]:
warnings.warn(__a , __a )
requires_backends(__a , '''sklearn''' )
__SCREAMING_SNAKE_CASE :Optional[Any] = simple_accuracy(__a , __a )
__SCREAMING_SNAKE_CASE :Any = fa_score(y_true=__a , y_pred=__a )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def __lowerCamelCase ( a_ : int , a_ : Union[str, Any] ) -> str:
warnings.warn(__a , __a )
requires_backends(__a , '''sklearn''' )
__SCREAMING_SNAKE_CASE :Union[str, Any] = pearsonr(__a , __a )[0]
__SCREAMING_SNAKE_CASE :int = spearmanr(__a , __a )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def __lowerCamelCase ( a_ : Optional[int] , a_ : int , a_ : Dict ) -> Tuple:
warnings.warn(__a , __a )
requires_backends(__a , '''sklearn''' )
assert len(__a ) == len(__a ), f'''Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}'''
if task_name == "cola":
return {"mcc": matthews_corrcoef(__a , __a )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "mrpc":
return acc_and_fa(__a , __a )
elif task_name == "sts-b":
return pearson_and_spearman(__a , __a )
elif task_name == "qqp":
return acc_and_fa(__a , __a )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__a , __a )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__a , __a )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "rte":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "hans":
return {"acc": simple_accuracy(__a , __a )}
else:
raise KeyError(__a )
def __lowerCamelCase ( a_ : Optional[Any] , a_ : Tuple , a_ : Optional[Any] ) -> Tuple:
warnings.warn(__a , __a )
requires_backends(__a , '''sklearn''' )
if len(__a ) != len(__a ):
raise ValueError(f'''Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}''' )
if task_name == "xnli":
return {"acc": simple_accuracy(__a , __a )}
else:
raise KeyError(__a ) | 498 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCamelCase_ ( ) -> int:
a__ : int = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"
a__ : Optional[Any] = Image.open(requests.get(__a , stream=__a ).raw ).convert("RGB" )
return image
def UpperCamelCase_ ( __a ) -> Optional[Any]:
a__ : Any = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") )
# fmt: on
return rename_keys
def UpperCamelCase_ ( __a , __a , __a ) -> List[str]:
a__ : Union[str, Any] = dct.pop(__a )
a__ : List[str] = val
def UpperCamelCase_ ( __a , __a ) -> Optional[Any]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
a__ : Any = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
a__ : Tuple = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
a__ : str = torch.cat((q_bias, torch.zeros_like(__a , requires_grad=__a ), v_bias) )
a__ : int = qkv_bias
def UpperCamelCase_ ( __a ) -> Dict:
a__ : Tuple = 364 if "coco" in model_name else 224
a__ : int = InstructBlipVisionConfig(image_size=__a ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
a__ : Tuple = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
a__ : Dict = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
a__ : List[Any] = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" , vocab_size=32_001 ).to_dict()
elif "vicuna-13b" in model_name:
a__ : Optional[int] = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" , vocab_size=32_001 ).to_dict()
else:
raise ValueError("Model name not supported" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
a__ : Optional[Any] = InstructBlipQFormerConfig(vocab_size=30_523 ).to_dict()
a__ : Any = InstructBlipConfig(vision_config=__a , text_config=__a , qformer_config=__a )
return config, image_size
@torch.no_grad()
def UpperCamelCase_ ( __a , __a=None , __a=False ) -> int:
a__ : Tuple = AutoTokenizer.from_pretrained("bert-base-uncased" , truncation_side="left" )
qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} )
if "t5" in model_name:
a__ : List[Any] = TaTokenizerFast.from_pretrained("google/flan-t5-xl" , truncation_side="left" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
a__ : Union[str, Any] = LlamaTokenizerFast.from_pretrained(
"huggyllama/llama-7b" , truncation_side="left" , bos_token="</s>" , unk_token="</s>" )
tokenizer.add_special_tokens({"pad_token": "[PAD]"} )
a__, a__ : List[str] = get_blipa_config(__a )
a__ : Any = InstructBlipForConditionalGeneration(__a ).eval()
a__ : Dict = {
"instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"),
"instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"),
"instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"),
"instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"),
}
a__, a__ : Dict = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
a__ : Optional[Any] = "cuda:1" if torch.cuda.is_available() else "cpu"
a__ : List[Any] = "cuda:2" if torch.cuda.is_available() else "cpu"
a__, a__, a__ : Tuple = load_model_and_preprocess(
name=__a , model_type=__a , is_eval=__a , device=__a )
original_model.eval()
print("Done!" )
# update state dict keys
a__ : Dict = original_model.state_dict()
a__ : Optional[int] = create_rename_keys(__a )
for src, dest in rename_keys:
rename_key(__a , __a , __a )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
a__ : Optional[int] = state_dict.pop(__a )
if key.startswith("Qformer.bert" ):
a__ : List[Any] = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
a__ : Any = key.replace("self" , "attention" )
if "llm_proj" in key:
a__ : Dict = key.replace("llm_proj" , "language_projection" )
if "t5_proj" in key:
a__ : int = key.replace("t5_proj" , "language_projection" )
if key.startswith("llm_model" ):
a__ : List[str] = key.replace("llm_model" , "language_model" )
if key.startswith("t5" ):
a__ : str = key.replace("t5" , "language" )
a__ : Dict = val
# read in qv biases
read_in_q_v_bias(__a , __a )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(__a , strict=__a )
a__ : Union[str, Any] = load_demo_image()
a__ : int = "What is unusual about this image?"
# create processor
a__ : Any = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=__a , image_std=__a )
a__ : Tuple = InstructBlipProcessor(
image_processor=__a , tokenizer=__a , qformer_tokenizer=__a , )
a__ : Tuple = processor(images=__a , text=__a , return_tensors="pt" ).to(__a )
# make sure processor creates exact same pixel values
a__ : Optional[int] = vis_processors["eval"](__a ).unsqueeze(0 ).to(__a )
a__ : Optional[Any] = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , __a )
original_model.to(__a )
hf_model.to(__a )
with torch.no_grad():
if "vicuna" in model_name:
a__ : str = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits
a__ : List[str] = hf_model(**__a ).logits
else:
a__ : List[Any] = original_model(
{"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits
a__ : str = tokenizer("\n" , return_tensors="pt" ).input_ids.to(__a )
a__ : Dict = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
a__ : Any = hf_model(**__a , labels=__a ).logits
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
a__ : Tuple = 1e-4 if "vicuna" in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) , __a , atol=__a )
print("Looks ok!" )
print("Generating with original model..." )
a__ : Tuple = original_model.generate({"image": original_pixel_values, "prompt": prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("Generating with HF model..." )
a__ : int = hf_model.generate(
**__a , do_sample=__a , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
a__ : int = 2
print("Original generation:" , __a )
a__ : str = processor.batch_decode(__a , skip_special_tokens=__a )
a__ : str = [text.strip() for text in output_text]
print("HF generation:" , __a )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__a )
hf_model.save_pretrained(__a )
if push_to_hub:
processor.push_to_hub(f'''Salesforce/{model_name}''' )
hf_model.push_to_hub(f'''Salesforce/{model_name}''' )
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
UpperCamelCase : Optional[int] = [
"""instructblip-vicuna-7b""",
"""instructblip-vicuna-13b""",
"""instructblip-flan-t5-xl""",
"""instructblip-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""instructblip-flan-t5-xl""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
UpperCamelCase : Dict = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 37 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a :
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=30 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=10 , snake_case_=0.0_2 , snake_case_=3 , snake_case_=None , snake_case_=2 , ):
'''simple docstring'''
__UpperCAmelCase: str = parent
__UpperCAmelCase: Any = batch_size
__UpperCAmelCase: Optional[int] = image_size
__UpperCAmelCase: Tuple = patch_size
__UpperCAmelCase: Union[str, Any] = num_channels
__UpperCAmelCase: str = is_training
__UpperCAmelCase: Dict = use_labels
__UpperCAmelCase: List[str] = hidden_size
__UpperCAmelCase: Union[str, Any] = num_hidden_layers
__UpperCAmelCase: Tuple = num_attention_heads
__UpperCAmelCase: Dict = intermediate_size
__UpperCAmelCase: List[Any] = hidden_act
__UpperCAmelCase: int = hidden_dropout_prob
__UpperCAmelCase: Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase: Dict = type_sequence_label_size
__UpperCAmelCase: Optional[Any] = initializer_range
__UpperCAmelCase: Tuple = scope
__UpperCAmelCase: List[Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__UpperCAmelCase: Optional[Any] = (image_size // patch_size) ** 2
__UpperCAmelCase: List[str] = num_patches + 2
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase: List[str] = None
if self.use_labels:
__UpperCAmelCase: int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase: str = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: List[str] = TFDeiTModel(config=lowerCamelCase__ )
__UpperCAmelCase: Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: str = TFDeiTForMaskedImageModeling(config=lowerCamelCase__ )
__UpperCAmelCase: Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__UpperCAmelCase: Dict = 1
__UpperCAmelCase: Union[str, Any] = TFDeiTForMaskedImageModeling(lowerCamelCase__ )
__UpperCAmelCase: List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCAmelCase: int = model(lowerCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = self.type_sequence_label_size
__UpperCAmelCase: Any = TFDeiTForImageClassification(lowerCamelCase__ )
__UpperCAmelCase: List[Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCAmelCase: List[str] = 1
__UpperCAmelCase: str = TFDeiTForImageClassification(lowerCamelCase__ )
__UpperCAmelCase: Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCAmelCase: Union[str, Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = self.prepare_config_and_inputs()
__UpperCAmelCase: Union[str, Any] = config_and_inputs
__UpperCAmelCase: List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class a ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
__lowerCAmelCase = (
{
"""feature-extraction""": TFDeiTModel,
"""image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Dict = TFDeiTModelTester(self )
__UpperCAmelCase: Optional[int] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def lowercase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def lowercase_ ( self ):
'''simple docstring'''
pass
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase: List[str] = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__UpperCAmelCase: Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , tf.keras.layers.Dense ) )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase: str = model_class(lowerCamelCase__ )
__UpperCAmelCase: int = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase: List[Any] = [*signature.parameters.keys()]
__UpperCAmelCase: Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_=False ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = super()._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def lowercase_ ( self ):
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase: Optional[Any] = TFDeiTModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def UpperCamelCase__ ( ) -> Tuple:
__UpperCAmelCase: Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: int = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
__UpperCAmelCase: Any = self.default_image_processor
__UpperCAmelCase: Any = prepare_img()
__UpperCAmelCase: Tuple = image_processor(images=lowerCamelCase__ , return_tensors="""tf""" )
# forward pass
__UpperCAmelCase: Dict = model(**lowerCamelCase__ )
# verify the logits
__UpperCAmelCase: Any = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
__UpperCAmelCase: Optional[Any] = tf.constant([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) ) | 523 |
def UpperCamelCase_ ( __a , __a ) -> Tuple:
a__ : Optional[int] = [0 for i in range(r + 1 )]
# nc0 = 1
a__ : Union[str, Any] = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
a__ : Any = min(__a , __a )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 37 | 0 |
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class A ( A__ , A__ , unittest.TestCase ):
a_ = AutoencoderKL
a_ = '''sample'''
a_ = 1e-2
@property
def snake_case__ ( self : str ) -> List[str]:
__UpperCAmelCase = 4
__UpperCAmelCase = 3
__UpperCAmelCase = (3_2, 3_2)
__UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase__ )
return {"sample": image}
@property
def snake_case__ ( self : List[Any] ) -> Tuple:
return (3, 3_2, 3_2)
@property
def snake_case__ ( self : Dict ) -> Dict:
return (3, 3_2, 3_2)
def snake_case__ ( self : List[str] ) -> Tuple:
__UpperCAmelCase = {
"block_out_channels": [3_2, 6_4],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
__UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def snake_case__ ( self : Tuple ) -> int:
pass
def snake_case__ ( self : Any ) -> Optional[Any]:
pass
@unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' )
def snake_case__ ( self : List[str] ) -> Tuple:
# enable deterministic behavior for gradient checkpointing
__UpperCAmelCase = self.prepare_init_args_and_inputs_for_common()
__UpperCAmelCase = self.model_class(**lowerCamelCase__ )
model.to(lowerCamelCase__ )
assert not model.is_gradient_checkpointing and model.training
__UpperCAmelCase = model(**lowerCamelCase__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__UpperCAmelCase = torch.randn_like(lowerCamelCase__ )
__UpperCAmelCase = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__UpperCAmelCase = self.model_class(**lowerCamelCase__ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowerCamelCase__ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__UpperCAmelCase = model_a(**lowerCamelCase__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__UpperCAmelCase = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
__UpperCAmelCase = dict(model.named_parameters() )
__UpperCAmelCase = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def snake_case__ ( self : Dict ) -> str:
__UpperCAmelCase = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(lowerCamelCase__ )
__UpperCAmelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
__UpperCAmelCase = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
__UpperCAmelCase = model.to(lowerCamelCase__ )
model.eval()
if torch_device == "mps":
__UpperCAmelCase = torch.manual_seed(0 )
else:
__UpperCAmelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__UpperCAmelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__UpperCAmelCase = image.to(lowerCamelCase__ )
with torch.no_grad():
__UpperCAmelCase = model(lowerCamelCase__ , sample_posterior=lowerCamelCase__ , generator=lowerCamelCase__ ).sample
__UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__UpperCAmelCase = torch.tensor(
[
-4.0_078e-01,
-3.8_323e-04,
-1.2_681e-01,
-1.1_462e-01,
2.0_095e-01,
1.0_893e-01,
-8.8_247e-02,
-3.0_361e-01,
-9.8_644e-03,
] )
elif torch_device == "cpu":
__UpperCAmelCase = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
__UpperCAmelCase = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(lowerCamelCase__ , lowerCamelCase__ , rtol=1e-2 ) )
@slow
class A ( unittest.TestCase ):
def snake_case__ ( self : Optional[int] , __a : Optional[Any] , __a : List[Any] ) -> Optional[Any]:
return f"""gaussian_noise_s={seed}_shape={'_'.join([str(lowerCamelCase__ ) for s in shape] )}.npy"""
def snake_case__ ( self : Tuple ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : Union[str, Any] , __a : Dict=0 , __a : Optional[int]=(4, 3, 5_1_2, 5_1_2) , __a : Tuple=False ) -> Union[str, Any]:
__UpperCAmelCase = torch.floataa if fpaa else torch.floataa
__UpperCAmelCase = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCamelCase__ , lowerCamelCase__ ) ) ).to(lowerCamelCase__ ).to(lowerCamelCase__ )
return image
def snake_case__ ( self : Optional[int] , __a : List[Any]="CompVis/stable-diffusion-v1-4" , __a : Optional[Any]=False ) -> Tuple:
__UpperCAmelCase = "fp16" if fpaa else None
__UpperCAmelCase = torch.floataa if fpaa else torch.floataa
__UpperCAmelCase = AutoencoderKL.from_pretrained(
lowerCamelCase__ , subfolder='''vae''' , torch_dtype=lowerCamelCase__ , revision=lowerCamelCase__ , )
model.to(lowerCamelCase__ ).eval()
return model
def snake_case__ ( self : Any , __a : int=0 ) -> Union[str, Any]:
if torch_device == "mps":
return torch.manual_seed(lowerCamelCase__ )
return torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
@parameterized.expand(
[
# fmt: off
[3_3, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[4_7, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def snake_case__ ( self : Optional[int] , __a : Any , __a : List[str] , __a : str ) -> Tuple:
__UpperCAmelCase = self.get_sd_vae_model()
__UpperCAmelCase = self.get_sd_image(lowerCamelCase__ )
__UpperCAmelCase = self.get_generator(lowerCamelCase__ )
with torch.no_grad():
__UpperCAmelCase = model(lowerCamelCase__ , generator=lowerCamelCase__ , sample_posterior=lowerCamelCase__ ).sample
assert sample.shape == image.shape
__UpperCAmelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__UpperCAmelCase = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(lowerCamelCase__ , lowerCamelCase__ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[3_3, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[4_7, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def snake_case__ ( self : Dict , __a : List[str] , __a : List[Any] ) -> Union[str, Any]:
__UpperCAmelCase = self.get_sd_vae_model(fpaa=lowerCamelCase__ )
__UpperCAmelCase = self.get_sd_image(lowerCamelCase__ , fpaa=lowerCamelCase__ )
__UpperCAmelCase = self.get_generator(lowerCamelCase__ )
with torch.no_grad():
__UpperCAmelCase = model(lowerCamelCase__ , generator=lowerCamelCase__ , sample_posterior=lowerCamelCase__ ).sample
assert sample.shape == image.shape
__UpperCAmelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__UpperCAmelCase = torch.tensor(lowerCamelCase__ )
assert torch_all_close(lowerCamelCase__ , lowerCamelCase__ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[3_3, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[4_7, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def snake_case__ ( self : Tuple , __a : Tuple , __a : List[Any] , __a : List[Any] ) -> Union[str, Any]:
__UpperCAmelCase = self.get_sd_vae_model()
__UpperCAmelCase = self.get_sd_image(lowerCamelCase__ )
with torch.no_grad():
__UpperCAmelCase = model(lowerCamelCase__ ).sample
assert sample.shape == image.shape
__UpperCAmelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__UpperCAmelCase = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(lowerCamelCase__ , lowerCamelCase__ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[1_3, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[3_7, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def snake_case__ ( self : Tuple , __a : Any , __a : List[str] ) -> Optional[Any]:
__UpperCAmelCase = self.get_sd_vae_model()
__UpperCAmelCase = self.get_sd_image(lowerCamelCase__ , shape=(3, 4, 6_4, 6_4) )
with torch.no_grad():
__UpperCAmelCase = model.decode(lowerCamelCase__ ).sample
assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2]
__UpperCAmelCase = sample[-1, -2:, :2, -2:].flatten().cpu()
__UpperCAmelCase = torch.tensor(lowerCamelCase__ )
assert torch_all_close(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[2_7, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[1_6, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def snake_case__ ( self : Union[str, Any] , __a : Union[str, Any] , __a : Tuple ) -> Optional[Any]:
__UpperCAmelCase = self.get_sd_vae_model(fpaa=lowerCamelCase__ )
__UpperCAmelCase = self.get_sd_image(lowerCamelCase__ , shape=(3, 4, 6_4, 6_4) , fpaa=lowerCamelCase__ )
with torch.no_grad():
__UpperCAmelCase = model.decode(lowerCamelCase__ ).sample
assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2]
__UpperCAmelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__UpperCAmelCase = torch.tensor(lowerCamelCase__ )
assert torch_all_close(lowerCamelCase__ , lowerCamelCase__ , atol=5e-3 )
@parameterized.expand([(1_3,), (1_6,), (2_7,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def snake_case__ ( self : str , __a : List[str] ) -> List[str]:
__UpperCAmelCase = self.get_sd_vae_model(fpaa=lowerCamelCase__ )
__UpperCAmelCase = self.get_sd_image(lowerCamelCase__ , shape=(3, 4, 6_4, 6_4) , fpaa=lowerCamelCase__ )
with torch.no_grad():
__UpperCAmelCase = model.decode(lowerCamelCase__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__UpperCAmelCase = model.decode(lowerCamelCase__ ).sample
assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2]
assert torch_all_close(lowerCamelCase__ , lowerCamelCase__ , atol=1e-1 )
@parameterized.expand([(1_3,), (1_6,), (3_7,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def snake_case__ ( self : List[str] , __a : Optional[Any] ) -> Dict:
__UpperCAmelCase = self.get_sd_vae_model()
__UpperCAmelCase = self.get_sd_image(lowerCamelCase__ , shape=(3, 4, 6_4, 6_4) )
with torch.no_grad():
__UpperCAmelCase = model.decode(lowerCamelCase__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__UpperCAmelCase = model.decode(lowerCamelCase__ ).sample
assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2]
assert torch_all_close(lowerCamelCase__ , lowerCamelCase__ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[3_3, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[4_7, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def snake_case__ ( self : Optional[int] , __a : Dict , __a : Optional[int] ) -> List[str]:
__UpperCAmelCase = self.get_sd_vae_model()
__UpperCAmelCase = self.get_sd_image(lowerCamelCase__ )
__UpperCAmelCase = self.get_generator(lowerCamelCase__ )
with torch.no_grad():
__UpperCAmelCase = model.encode(lowerCamelCase__ ).latent_dist
__UpperCAmelCase = dist.sample(generator=lowerCamelCase__ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__UpperCAmelCase = sample[0, -1, -3:, -3:].flatten().cpu()
__UpperCAmelCase = torch.tensor(lowerCamelCase__ )
__UpperCAmelCase = 3e-3 if torch_device != "mps" else 1e-2
assert torch_all_close(lowerCamelCase__ , lowerCamelCase__ , atol=lowerCamelCase__ )
| 262 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase : Optional[Any] = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
UpperCamelCase : Dict = {
"""allenai/led-base-16384""": 1_6384,
}
class A__ ( A__ ):
"""simple docstring"""
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = LEDTokenizer
_lowercase = ['input_ids', 'attention_mask']
def __init__( self : Tuple , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : int="replace" , lowerCamelCase__ : Union[str, Any]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Tuple="</s>" , lowerCamelCase__ : Optional[int]="<s>" , lowerCamelCase__ : str="<unk>" , lowerCamelCase__ : Any="<pad>" , lowerCamelCase__ : Any="<mask>" , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : int=True , **lowerCamelCase__ : Union[str, Any] , ):
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , )
a__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space:
a__ : List[str] = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) )
a__ : Optional[Any] = add_prefix_space
a__ : List[str] = pre_tok_class(**lowerCamelCase__ )
a__ : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
a__ : Any = "post_processor"
a__ : str = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
if tokenizer_component_instance:
a__ : Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a__ : Optional[Any] = tuple(state["sep"] )
if "cls" in state:
a__ : Optional[Any] = tuple(state["cls"] )
a__ : Optional[int] = False
if state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space:
a__ : Dict = add_prefix_space
a__ : int = True
if state.get("trim_offsets" , lowerCamelCase__ ) != trim_offsets:
a__ : List[Any] = trim_offsets
a__ : List[str] = True
if changes_to_apply:
a__ : int = getattr(lowerCamelCase__ , state.pop("type" ) )
a__ : int = component_class(**lowerCamelCase__ )
setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _UpperCamelCase( self : Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Union[str, Any] ):
a__ : Any = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value
a__ : Union[str, Any] = value
def _UpperCamelCase( self : Any , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ):
a__ : List[str] = kwargs.get("is_split_into_words" , lowerCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : Any , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Optional[Any] ):
a__ : Dict = kwargs.get("is_split_into_words" , lowerCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ):
a__ : List[str] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=None ):
a__ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ):
a__ : List[str] = [self.sep_token_id]
a__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase( self : Dict , lowerCamelCase__ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , ):
a__ : str = super()._pad(
encoded_inputs=lowerCamelCase__ , max_length=lowerCamelCase__ , padding_strategy=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
# Load from model defaults
if return_attention_mask is None:
a__ : Optional[int] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
a__ : Tuple = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
a__ : Dict = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase__ )
if needs_to_be_padded:
a__ : Union[str, Any] = len(lowerCamelCase__ ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
a__ : List[Any] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
a__ : Any = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 37 | 0 |
'''simple docstring'''
from __future__ import annotations
def snake_case ( a_ : Union[str, Any] , a_ : Optional[Any] ) -> list[int]:
"""simple docstring"""
UpperCamelCase_ : Dict = 0
UpperCamelCase_ : Any = len(__a ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
UpperCamelCase_ : Optional[int] = i + 1
else:
UpperCamelCase_ : Optional[Any] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{two_pointer([2, 7, 11, 15], 9) = }")
| 208 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase : Union[str, Any] = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
UpperCamelCase : List[str] = {
"""roberta-base""": 512,
"""roberta-large""": 512,
"""roberta-large-mnli""": 512,
"""distilroberta-base""": 512,
"""roberta-base-openai-detector""": 512,
"""roberta-large-openai-detector""": 512,
}
class A__ ( A__ ):
"""simple docstring"""
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = ['input_ids', 'attention_mask']
_lowercase = RobertaTokenizer
def __init__( self : List[str] , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]="replace" , lowerCamelCase__ : List[str]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Any="</s>" , lowerCamelCase__ : Any="<s>" , lowerCamelCase__ : int="<unk>" , lowerCamelCase__ : Any="<pad>" , lowerCamelCase__ : Tuple="<mask>" , lowerCamelCase__ : Any=False , lowerCamelCase__ : Dict=True , **lowerCamelCase__ : Optional[Any] , ):
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , )
a__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space:
a__ : Any = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) )
a__ : int = add_prefix_space
a__ : Tuple = pre_tok_class(**lowerCamelCase__ )
a__ : str = add_prefix_space
a__ : Tuple = "post_processor"
a__ : Dict = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
if tokenizer_component_instance:
a__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a__ : Tuple = tuple(state["sep"] )
if "cls" in state:
a__ : str = tuple(state["cls"] )
a__ : str = False
if state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space:
a__ : str = add_prefix_space
a__ : Any = True
if state.get("trim_offsets" , lowerCamelCase__ ) != trim_offsets:
a__ : int = trim_offsets
a__ : Dict = True
if changes_to_apply:
a__ : Union[str, Any] = getattr(lowerCamelCase__ , state.pop("type" ) )
a__ : str = component_class(**lowerCamelCase__ )
setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
@property
def _UpperCamelCase( self : Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Tuple ):
a__ : List[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value
a__ : List[str] = value
def _UpperCamelCase( self : Union[str, Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : int ):
a__ : Optional[int] = kwargs.get("is_split_into_words" , lowerCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : Tuple , *lowerCamelCase__ : Dict , **lowerCamelCase__ : List[str] ):
a__ : Dict = kwargs.get("is_split_into_words" , lowerCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : str , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ):
a__ : int = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int]=None ):
a__ : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCamelCase( self : Dict , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ):
a__ : Tuple = [self.sep_token_id]
a__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 37 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
lowercase : List[str] = logging.get_logger(__name__)
lowercase : List[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowercase : Union[str, Any] = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
lowercase : Optional[int] = {
"""google/realm-cc-news-pretrained-embedder""": 5_1_2,
"""google/realm-cc-news-pretrained-encoder""": 5_1_2,
"""google/realm-cc-news-pretrained-scorer""": 5_1_2,
"""google/realm-cc-news-pretrained-openqa""": 5_1_2,
"""google/realm-orqa-nq-openqa""": 5_1_2,
"""google/realm-orqa-nq-reader""": 5_1_2,
"""google/realm-orqa-wq-openqa""": 5_1_2,
"""google/realm-orqa-wq-reader""": 5_1_2,
}
lowercase : List[str] = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class a__ ( A__ ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_INIT_CONFIGURATION
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = RealmTokenizer
def __init__( self : str , A_ : str=None , A_ : int=None , A_ : Any=True , A_ : Any="[UNK]" , A_ : Optional[int]="[SEP]" , A_ : Optional[Any]="[PAD]" , A_ : Optional[int]="[CLS]" , A_ : Tuple="[MASK]" , A_ : int=True , A_ : Optional[int]=None , **A_ : str , ) -> str:
"""simple docstring"""
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , )
lowerCamelCase_: Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCamelCase__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCamelCase__ ) != tokenize_chinese_chars
):
lowerCamelCase_: Optional[Any] = getattr(lowerCamelCase__ , normalizer_state.pop("""type""" ) )
lowerCamelCase_: List[str] = do_lower_case
lowerCamelCase_: List[Any] = strip_accents
lowerCamelCase_: Optional[Any] = tokenize_chinese_chars
lowerCamelCase_: Optional[int] = normalizer_class(**lowerCamelCase__ )
lowerCamelCase_: Union[str, Any] = do_lower_case
def lowerCAmelCase ( self : Any , A_ : int , **A_ : int ) -> Dict:
"""simple docstring"""
lowerCamelCase_: List[str] = PaddingStrategy.MAX_LENGTH
lowerCamelCase_: Dict = text
lowerCamelCase_: Optional[int] = kwargs.pop("""text_pair""" , lowerCamelCase__ )
lowerCamelCase_: Dict = kwargs.pop("""return_tensors""" , lowerCamelCase__ )
lowerCamelCase_: List[Any] = {
"input_ids": [],
"attention_mask": [],
"token_type_ids": [],
}
for idx, candidate_text in enumerate(lowerCamelCase__ ):
if batch_text_pair is not None:
lowerCamelCase_: Dict = batch_text_pair[idx]
else:
lowerCamelCase_: Union[str, Any] = None
lowerCamelCase_: int = super().__call__(lowerCamelCase__ , lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
lowerCamelCase_: Union[str, Any] = encoded_candidates.get("""input_ids""" )
lowerCamelCase_: List[Any] = encoded_candidates.get("""attention_mask""" )
lowerCamelCase_: int = encoded_candidates.get("""token_type_ids""" )
if encoded_input_ids is not None:
output_data["input_ids"].append(lowerCamelCase__ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(lowerCamelCase__ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(lowerCamelCase__ )
lowerCamelCase_: Union[str, Any] = {key: item for key, item in output_data.items() if len(lowerCamelCase__ ) != 0}
return BatchEncoding(lowerCamelCase__ , tensor_type=lowerCamelCase__ )
def lowerCAmelCase ( self : List[str] , A_ : Optional[Any] , A_ : str=None ) -> Any:
"""simple docstring"""
lowerCamelCase_: Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase ( self : Dict , A_ : List[int] , A_ : Optional[List[int]] = None ) -> Any:
"""simple docstring"""
lowerCamelCase_: List[Any] = [self.sep_token_id]
lowerCamelCase_: Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase ( self : List[Any] , A_ : str , A_ : Optional[str] = None ) -> str:
"""simple docstring"""
lowerCamelCase_: Tuple = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 423 |
from statistics import mean, stdev
def UpperCamelCase_ ( __a , __a = 3 ) -> list:
a__ : List[str] = min(__a )
a__ : str = max(__a )
# normalize data
return [round((x - x_min) / (x_max - x_min) , __a ) for x in data]
def UpperCamelCase_ ( __a , __a = 3 ) -> list:
a__ : str = mean(__a )
a__ : List[str] = stdev(__a )
# standardize data
return [round((x - mu) / (sigma) , __a ) for x in data]
| 37 | 0 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def a_ ( lowerCAmelCase_ : str ):
__lowerCAmelCase = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__lowerCAmelCase = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
__lowerCAmelCase = 4
__lowerCAmelCase = 48
__lowerCAmelCase = "pixelshuffle_aux"
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__lowerCAmelCase = [6, 6, 6, 6]
__lowerCAmelCase = 60
__lowerCAmelCase = [6, 6, 6, 6]
__lowerCAmelCase = "pixelshuffledirect"
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__lowerCAmelCase = 4
__lowerCAmelCase = "nearest+conv"
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
__lowerCAmelCase = 1
__lowerCAmelCase = 1
__lowerCAmelCase = 126
__lowerCAmelCase = 7
__lowerCAmelCase = 255.0
__lowerCAmelCase = ""
return config
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : Union[str, Any] ):
if "patch_embed.proj" in name and "layers" not in name:
__lowerCAmelCase = name.replace('patch_embed.proj', 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__lowerCAmelCase = name.replace('patch_embed.norm', 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
__lowerCAmelCase = name.replace('layers', 'encoder.stages' )
if "residual_group.blocks" in name:
__lowerCAmelCase = name.replace('residual_group.blocks', 'layers' )
if "attn.proj" in name:
__lowerCAmelCase = name.replace('attn.proj', 'attention.output.dense' )
if "attn" in name:
__lowerCAmelCase = name.replace('attn', 'attention.self' )
if "norm1" in name:
__lowerCAmelCase = name.replace('norm1', 'layernorm_before' )
if "norm2" in name:
__lowerCAmelCase = name.replace('norm2', 'layernorm_after' )
if "mlp.fc1" in name:
__lowerCAmelCase = name.replace('mlp.fc1', 'intermediate.dense' )
if "mlp.fc2" in name:
__lowerCAmelCase = name.replace('mlp.fc2', 'output.dense' )
if "q_bias" in name:
__lowerCAmelCase = name.replace('q_bias', 'query.bias' )
if "k_bias" in name:
__lowerCAmelCase = name.replace('k_bias', 'key.bias' )
if "v_bias" in name:
__lowerCAmelCase = name.replace('v_bias', 'value.bias' )
if "cpb_mlp" in name:
__lowerCAmelCase = name.replace('cpb_mlp', 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
__lowerCAmelCase = name.replace('patch_embed.proj', 'patch_embed.projection' )
if name == "norm.weight":
__lowerCAmelCase = "layernorm.weight"
if name == "norm.bias":
__lowerCAmelCase = "layernorm.bias"
if "conv_first" in name:
__lowerCAmelCase = name.replace('conv_first', 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
__lowerCAmelCase = name.replace('conv_last', 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
__lowerCAmelCase = name.replace('conv_before_upsample.0', 'conv_before_upsample' )
if "upsample.0" in name:
__lowerCAmelCase = name.replace('upsample.0', 'upsample.convolution_0' )
if "upsample.2" in name:
__lowerCAmelCase = name.replace('upsample.2', 'upsample.convolution_1' )
__lowerCAmelCase = "upsample." + name
elif config.upsampler == "pixelshuffledirect":
__lowerCAmelCase = name.replace('upsample.0.weight', 'upsample.conv.weight' )
__lowerCAmelCase = name.replace('upsample.0.bias', 'upsample.conv.bias' )
else:
pass
else:
__lowerCAmelCase = "swin2sr." + name
return name
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : str ):
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(__a )
if "qkv" in key:
__lowerCAmelCase = key.split('.' )
__lowerCAmelCase = int(key_split[1] )
__lowerCAmelCase = int(key_split[4] )
__lowerCAmelCase = config.embed_dim
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
pass
else:
__lowerCAmelCase = val
return orig_state_dict
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Any ):
__lowerCAmelCase = get_config(__a )
__lowerCAmelCase = SwinaSRForImageSuperResolution(__a )
model.eval()
__lowerCAmelCase = torch.hub.load_state_dict_from_url(__a, map_location='cpu' )
__lowerCAmelCase = convert_state_dict(__a, __a )
__lowerCAmelCase = model.load_state_dict(__a, strict=__a )
if len(__a ) > 0:
raise ValueError('Missing keys when converting: {}'.format(__a ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F"""Unexpected key {key} in state_dict""" )
# verify values
__lowerCAmelCase = "https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"
__lowerCAmelCase = Image.open(requests.get(__a, stream=__a ).raw ).convert('RGB' )
__lowerCAmelCase = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
__lowerCAmelCase = 126 if "Jpeg" in checkpoint_url else 256
__lowerCAmelCase = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] ),
] )
__lowerCAmelCase = transforms(__a ).unsqueeze(0 )
if config.num_channels == 1:
__lowerCAmelCase = pixel_values[:, 0, :, :].unsqueeze(1 )
__lowerCAmelCase = model(__a )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
__lowerCAmelCase = torch.Size([1, 3, 512, 512] )
__lowerCAmelCase = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__lowerCAmelCase = torch.Size([1, 3, 1024, 1024] )
__lowerCAmelCase = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
__lowerCAmelCase = torch.Size([1, 3, 1024, 1024] )
__lowerCAmelCase = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__lowerCAmelCase = torch.Size([1, 3, 512, 512] )
__lowerCAmelCase = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__lowerCAmelCase = torch.Size([1, 3, 1024, 1024] )
__lowerCAmelCase = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), F"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"""
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3], __a, atol=1E-3 )
print('Looks ok!' )
__lowerCAmelCase = {
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth": (
"swin2SR-classical-sr-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth": (
"swin2SR-classical-sr-x4-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth": (
"swin2SR-compressed-sr-x4-48"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth": (
"swin2SR-lightweight-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth": (
"swin2SR-realworld-sr-x4-64-bsrgan-psnr"
),
}
__lowerCAmelCase = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__a )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(__a )
if push_to_hub:
model.push_to_hub(F"""caidas/{model_name}""" )
processor.push_to_hub(F"""caidas/{model_name}""" )
if __name__ == "__main__":
_snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth',
type=str,
help='URL of the original Swin2SR checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the converted model to the hub.')
_snake_case : Any = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 53 |
def UpperCamelCase_ ( __a = 50 ) -> int:
a__ : Tuple = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 37 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Optional[Any] = {
"""configuration_clipseg""": [
"""CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPSegConfig""",
"""CLIPSegTextConfig""",
"""CLIPSegVisionConfig""",
],
"""processing_clipseg""": ["""CLIPSegProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
"""CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPSegModel""",
"""CLIPSegPreTrainedModel""",
"""CLIPSegTextModel""",
"""CLIPSegVisionModel""",
"""CLIPSegForImageSegmentation""",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 130 |
class A__ :
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] ):
a__ : str = name
a__ : Optional[int] = value
a__ : Dict = weight
def __repr__( self : Union[str, Any] ):
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def _UpperCamelCase( self : Dict ):
return self.value
def _UpperCamelCase( self : Optional[Any] ):
return self.name
def _UpperCamelCase( self : Optional[Any] ):
return self.weight
def _UpperCamelCase( self : Optional[int] ):
return self.value / self.weight
def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]:
a__ : Optional[Any] = []
for i in range(len(__a ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def UpperCamelCase_ ( __a , __a , __a ) -> Union[str, Any]:
a__ : List[str] = sorted(__a , key=__a , reverse=__a )
a__ : List[Any] = []
a__, a__ : Union[str, Any] = 0.0, 0.0
for i in range(len(__a ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def UpperCamelCase_ ( ) -> Union[str, Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37 | 0 |
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__lowerCAmelCase : Dict = """<<<<<<< This should probably be modified because it mentions: """
__lowerCAmelCase : List[Any] = """=======
>>>>>>>
"""
__lowerCAmelCase : Optional[Any] = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
__lowerCAmelCase : Any = [
# (pattern, replacement)
# Order is important here for some replacements
(r"""tfds\.core""", r"""datasets"""),
(r"""tf\.io\.gfile\.GFile""", r"""open"""),
(r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""),
(r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""),
(r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""),
(r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""),
(r"""tfds\.features\.FeaturesDict\(""", r"""dict("""),
(r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(r"""tfds\.""", r"""datasets."""),
(r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""),
(r"""self\.builder_config""", r"""self.config"""),
]
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class a_ ( A__ ):
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case__ : ArgumentParser ):
lowerCAmelCase__ = parser.add_parser(
"""convert""" , help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" , )
train_parser.add_argument(
"""--tfds_path""" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" , )
train_parser.add_argument(
"""--datasets_directory""" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="""Path to the HuggingFace Datasets folder.""" )
train_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self : List[str] , snake_case__ : str , snake_case__ : str , *snake_case__ : Tuple ):
lowerCAmelCase__ = get_logger("""datasets-cli/converting""" )
lowerCAmelCase__ = tfds_path
lowerCAmelCase__ = datasets_directory
def _SCREAMING_SNAKE_CASE ( self : int ):
if os.path.isdir(self._tfds_path ):
lowerCAmelCase__ = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowerCAmelCase__ = os.path.dirname(self._tfds_path )
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" )
lowerCAmelCase__ = os.path.abspath(self._datasets_directory )
self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = {}
if os.path.isdir(self._tfds_path ):
lowerCAmelCase__ = os.listdir(lowerCamelCase__ )
else:
lowerCAmelCase__ = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F"""Looking at file {f_name}""" )
lowerCAmelCase__ = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
if not os.path.isfile(lowerCamelCase__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""" )
continue
with open(lowerCamelCase__ , encoding="""utf-8""" ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = []
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = []
for line in lines:
lowerCAmelCase__ = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowerCAmelCase__ = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
lowerCAmelCase__ = ""
continue
elif "from absl import logging" in out_line:
lowerCAmelCase__ = "from datasets import logging\n"
elif "getLogger" in out_line:
lowerCAmelCase__ = out_line.replace("""getLogger""" , """get_logger""" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowerCAmelCase__ = True
lowerCAmelCase__ = list(filter(lambda snake_case__ : e in out_line , lowerCamelCase__ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCamelCase__ ) + """\n""" )
out_lines.append(lowerCamelCase__ )
out_lines.append(lowerCamelCase__ )
continue
else:
for pattern, replacement in TO_CONVERT:
lowerCAmelCase__ = re.sub(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowerCAmelCase__ = re.match(R"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" , lowerCamelCase__ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) )
lowerCAmelCase__ = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowerCAmelCase__ = True
out_lines.append(lowerCamelCase__ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowerCAmelCase__ = f_name.replace(""".py""" , """""" )
lowerCAmelCase__ = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
self._logger.info(F"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCamelCase__ )
if needs_manual_update:
with_manual_update.append(lowerCamelCase__ )
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.writelines(lowerCamelCase__ )
self._logger.info(F"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
lowerCAmelCase__ = os.path.basename(lowerCamelCase__ )
lowerCAmelCase__ = imports_to_builder_map[f_name.replace(""".py""" , """""" )]
self._logger.info(F"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(lowerCamelCase__ , lowerCamelCase__ )
except KeyError:
self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"""You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.""" )
| 644 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class A__ ( A__ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase__ : Union[str, "sqlalchemy.sql.Selectable"] , lowerCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , **lowerCamelCase__ : Optional[int] , ):
super().__init__(features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , **lowerCamelCase__ )
a__ : str = Sql(
cache_dir=lowerCamelCase__ , features=lowerCamelCase__ , sql=lowerCamelCase__ , con=lowerCamelCase__ , **lowerCamelCase__ , )
def _UpperCamelCase( self : Tuple ):
a__ : Optional[Any] = None
a__ : Dict = None
a__ : Union[str, Any] = None
a__ : Union[str, Any] = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , )
# Build dataset for splits
a__ : List[str] = self.builder.as_dataset(
split="train" , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory )
return dataset
class A__ :
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase__ : Dataset , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : Optional[Any] , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
a__ : Any = dataset
a__ : str = name
a__ : Tuple = con
a__ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
a__ : Any = num_proc
a__ : Tuple = to_sql_kwargs
def _UpperCamelCase( self : List[Any] ):
a__ : Any = self.to_sql_kwargs.pop("sql" , lowerCamelCase__ )
a__ : int = self.to_sql_kwargs.pop("con" , lowerCamelCase__ )
a__ : int = self.to_sql_kwargs.pop("index" , lowerCamelCase__ )
a__ : int = self._write(index=lowerCamelCase__ , **self.to_sql_kwargs )
return written
def _UpperCamelCase( self : Any , lowerCamelCase__ : List[str] ):
a__, a__, a__ : Union[str, Any] = args
a__ : Any = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
a__ : Tuple = query_table(
table=self.dataset.data , key=slice(lowerCamelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , )
a__ : str = batch.to_pandas()
a__ : List[Any] = df.to_sql(self.name , self.con , index=lowerCamelCase__ , **lowerCamelCase__ )
return num_rows or len(lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ):
a__ : str = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
a__, a__ : List[str] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , lowerCamelCase__ , lowerCamelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 37 | 0 |
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
if is_torch_version('''<''' , '''2.0.0''' ) or not hasattr(__a , '''_dynamo''' ):
return False
return isinstance(__a , torch._dynamo.eval_frame.OptimizedModule )
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : Any = True ):
_UpperCAmelCase : Union[str, Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
_UpperCAmelCase : List[Any] = is_compiled_module(__a )
if is_compiled:
_UpperCAmelCase : Optional[int] = model
_UpperCAmelCase : List[str] = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__a , __a ):
_UpperCAmelCase : int = model.module
if not keep_fpaa_wrapper:
_UpperCAmelCase : Union[str, Any] = getattr(__a , '''forward''' )
_UpperCAmelCase : Union[str, Any] = model.__dict__.pop('''_original_forward''' , __a )
if original_forward is not None:
while hasattr(__a , '''__wrapped__''' ):
_UpperCAmelCase : int = forward.__wrapped__
if forward == original_forward:
break
_UpperCAmelCase : Any = forward
if getattr(__a , '''_converted_to_transformer_engine''' , __a ):
convert_model(__a , to_transformer_engine=__a )
if is_compiled:
_UpperCAmelCase : List[str] = model
_UpperCAmelCase : Optional[int] = compiled_model
return model
def lowerCamelCase_ ():
PartialState().wait_for_everyone()
def lowerCamelCase_ (UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__a , __a )
elif PartialState().local_process_index == 0:
torch.save(__a , __a )
@contextmanager
def lowerCamelCase_ (**UpperCamelCase__ : List[str] ):
for key, value in kwargs.items():
_UpperCAmelCase : int = str(__a )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] ):
if not hasattr(__a , '''__qualname__''' ) and not hasattr(__a , '''__name__''' ):
_UpperCAmelCase : Union[str, Any] = getattr(__a , '''__class__''' , __a )
if hasattr(__a , '''__qualname__''' ):
return obj.__qualname__
if hasattr(__a , '''__name__''' ):
return obj.__name__
return str(__a )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] ):
for key, value in source.items():
if isinstance(__a , __a ):
_UpperCAmelCase : Any = destination.setdefault(__a , {} )
merge_dicts(__a , __a )
else:
_UpperCAmelCase : List[str] = value
return destination
def lowerCamelCase_ (UpperCamelCase__ : Dict = None ):
if port is None:
_UpperCAmelCase : int = 2_9500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0
| 506 |
import math
from datetime import datetime, timedelta
def UpperCamelCase_ ( __a ) -> datetime:
a__ : Union[str, Any] = year % 19
a__ : List[str] = year % 4
a__ : str = year % 7
a__ : Any = math.floor(year / 100 )
a__ : List[str] = math.floor((13 + 8 * leap_day_inhibits) / 25 )
a__ : Optional[int] = leap_day_inhibits / 4
a__ : Union[str, Any] = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
a__ : Dict = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
a__ : Any = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
a__ : List[Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__a , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__a , 4 , 18 )
else:
return datetime(__a , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
UpperCamelCase : Tuple = """will be""" if year > datetime.now().year else """was"""
print(f"""Easter in {year} {tense} {gauss_easter(year)}""")
| 37 | 0 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : int ):
return 10 - x * x
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : str ):
# Bolzano theory in order to find if there is a root between a and b
if equation(__a ) * equation(__a ) >= 0:
raise ValueError("Wrong space!" )
_a = a
while (b - a) >= 0.01:
# Find middle point
_a = (a + b) / 2
# Check if middle point is root
if equation(__a ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__a ) * equation(__a ) < 0:
_a = c
else:
_a = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 131 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def UpperCamelCase_ ( __a ) -> Union[str, Any]:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase__ : nn.Module , lowerCamelCase__ : int ):
super().__init__()
a__ : int = module
a__ : Any = nn.Sequential(
nn.Linear(module.in_features , lowerCamelCase__ , bias=lowerCamelCase__ ) , nn.Linear(lowerCamelCase__ , module.out_features , bias=lowerCamelCase__ ) , )
a__ : Tuple = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCamelCase__ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , *lowerCamelCase__ : int , **lowerCamelCase__ : Dict ):
return self.module(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) + self.adapter(lowerCamelCase__ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
"""simple docstring"""
_lowercase = 'bigscience/bloom-1b7'
# Constant values
_lowercase = 2.1_09_65_95_52_69_25_74
_lowercase = 'Hello my name is'
_lowercase = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
_lowercase = 1_0
def _UpperCamelCase( self : Dict ):
# Models and tokenizer
a__ : List[str] = AutoTokenizer.from_pretrained(self.model_name )
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : Union[str, Any] ):
super().setUp()
# Models and tokenizer
a__ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
a__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
def _UpperCamelCase( self : List[Any] ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase( self : List[Any] ):
a__ : str = self.model_abit.config
self.assertTrue(hasattr(lowerCamelCase__ , "quantization_config" ) )
a__ : Optional[Any] = config.to_dict()
a__ : int = config.to_diff_dict()
a__ : List[str] = config.to_json_string()
def _UpperCamelCase( self : int ):
from bitsandbytes.nn import Paramsabit
a__ : List[Any] = self.model_fpaa.get_memory_footprint()
a__ : str = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
a__ : Optional[Any] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def _UpperCamelCase( self : Tuple ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCamelCase__ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def _UpperCamelCase( self : str ):
a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" )
a__ : Tuple = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS )
def _UpperCamelCase( self : List[Any] ):
a__ : Optional[Any] = BitsAndBytesConfig()
a__ : Optional[int] = True
a__ : int = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCamelCase__ , device_map="auto" )
a__ : str = self.tokenizer(self.input_text , return_tensors="pt" )
a__ : int = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS )
def _UpperCamelCase( self : Dict ):
with self.assertRaises(lowerCamelCase__ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCamelCase__ )
def _UpperCamelCase( self : Union[str, Any] ):
a__ : int = BitsAndBytesConfig()
with self.assertRaises(lowerCamelCase__ ):
a__ : Dict = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCamelCase__ , load_in_abit=lowerCamelCase__ , device_map="auto" , bnb_abit_quant_type="nf4" , )
def _UpperCamelCase( self : int ):
with self.assertRaises(lowerCamelCase__ ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(lowerCamelCase__ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCamelCase__ ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(lowerCamelCase__ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCamelCase__ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
a__ : int = self.tokenizer(self.input_text , return_tensors="pt" )
a__ : Any = self.model_fpaa.to(torch.floataa )
a__ : List[Any] = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
a__ : Tuple = self.model_fpaa.to("cpu" )
# Check this does not throw an error
a__ : Tuple = self.model_fpaa.half()
# Check this does not throw an error
a__ : Dict = self.model_fpaa.float()
def _UpperCamelCase( self : Dict ):
a__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=lowerCamelCase__ , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def _UpperCamelCase( cls : str ):
a__ : Dict = "t5-small"
a__ : List[Any] = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
a__ : int = AutoTokenizer.from_pretrained(cls.model_name )
a__ : str = "Translate in German: Hello, my dog is cute"
def _UpperCamelCase( self : Optional[int] ):
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase( self : Optional[int] ):
from transformers import TaForConditionalGeneration
a__ : List[Any] = TaForConditionalGeneration._keep_in_fpaa_modules
a__ : Optional[Any] = None
# test with `t5-small`
a__ : Any = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a__ : int = model.generate(**lowerCamelCase__ )
# test with `flan-t5-small`
a__ : Dict = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a__ : Any = model.generate(**lowerCamelCase__ )
a__ : Union[str, Any] = modules
def _UpperCamelCase( self : List[Any] ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
a__ : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
a__ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a__ : int = model.generate(**lowerCamelCase__ )
# test with `flan-t5-small`
a__ : int = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
a__ : Any = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a__ : Optional[int] = model.generate(**lowerCamelCase__ )
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : List[str] ):
super().setUp()
# model_name
a__ : Union[str, Any] = "bigscience/bloom-560m"
a__ : Union[str, Any] = "t5-small"
# Different types of model
a__ : int = AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
# Sequence classification model
a__ : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
# CausalLM model
a__ : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
# Seq2seq model
a__ : Dict = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
def _UpperCamelCase( self : List[Any] ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase( self : Union[str, Any] ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : Dict ):
super().setUp()
def _UpperCamelCase( self : int ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase( self : Tuple ):
a__ : int = pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
a__ : Tuple = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : Tuple ):
super().setUp()
def _UpperCamelCase( self : List[Any] ):
a__ : str = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCamelCase__ , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
a__ : List[Any] = self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
a__ : List[Any] = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS )
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : Dict ):
a__ : Any = "facebook/opt-350m"
super().setUp()
def _UpperCamelCase( self : int ):
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
a__ : Tuple = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
a__ : Any = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
a__ : Tuple = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCamelCase__ ) ):
a__ : Dict = LoRALayer(module.q_proj , rank=16 )
a__ : List[Any] = LoRALayer(module.k_proj , rank=16 )
a__ : List[Any] = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
a__ : Dict = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
a__ : Optional[Any] = model.forward(**lowerCamelCase__ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCamelCase__ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'gpt2-xl'
_lowercase = 3.31_91_85_48_54_15_21_87
| 37 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> tuple:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 77 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class A__ :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int]=100 , lowerCamelCase__ : str=13 , lowerCamelCase__ : Optional[int]=30 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : int=32 , lowerCamelCase__ : Union[str, Any]=4 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Union[str, Any]=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Union[str, Any]=10 , lowerCamelCase__ : str=0.02 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]=[0, 1, 2, 3] , ):
a__ : Dict = parent
a__ : Dict = 100
a__ : Optional[int] = batch_size
a__ : Union[str, Any] = image_size
a__ : Any = patch_size
a__ : Optional[Any] = num_channels
a__ : int = is_training
a__ : List[str] = use_labels
a__ : Optional[Any] = hidden_size
a__ : List[Any] = num_hidden_layers
a__ : str = num_attention_heads
a__ : str = intermediate_size
a__ : int = hidden_act
a__ : List[Any] = hidden_dropout_prob
a__ : Dict = attention_probs_dropout_prob
a__ : Union[str, Any] = type_sequence_label_size
a__ : Optional[Any] = initializer_range
a__ : List[str] = scope
a__ : int = out_indices
a__ : List[str] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a__ : Optional[int] = (image_size // patch_size) ** 2
a__ : Union[str, Any] = num_patches + 1
def _UpperCamelCase( self : int ):
a__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ : Optional[Any] = None
a__ : Tuple = None
if self.use_labels:
a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a__ : Optional[int] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _UpperCamelCase( self : Tuple ):
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any ):
a__ : str = BeitModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ):
a__ : int = BeitForMaskedImageModeling(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : List[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _UpperCamelCase( self : str , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any ):
a__ : List[str] = self.type_sequence_label_size
a__ : Optional[Any] = BeitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : str = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a__ : Optional[Any] = 1
a__ : List[str] = BeitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a__ : Union[str, Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCamelCase( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ):
a__ : int = self.num_labels
a__ : List[str] = BeitForSemanticSegmentation(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : Tuple = model(lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
a__ : str = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _UpperCamelCase( self : Optional[int] ):
a__ : Any = self.prepare_config_and_inputs()
a__, a__, a__, a__ : Union[str, Any] = config_and_inputs
a__ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
_lowercase = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
def _UpperCamelCase( self : Any ):
a__ : int = BeitModelTester(self )
a__ : Optional[Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def _UpperCamelCase( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def _UpperCamelCase( self : str ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def _UpperCamelCase( self : Dict ):
pass
def _UpperCamelCase( self : Optional[Any] ):
a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : List[str] = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def _UpperCamelCase( self : str ):
a__, a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : int = model_class(lowerCamelCase__ )
a__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Optional[int] = [*signature.parameters.keys()]
a__ : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
a__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _UpperCamelCase( self : int ):
a__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
a__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] ):
a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
if not self.model_tester.is_training:
return
a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a__ : str = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling]:
continue
a__ : List[str] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
a__ : Any = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
a__ : Tuple = model(**lowerCamelCase__ ).loss
loss.backward()
def _UpperCamelCase( self : Tuple ):
a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
a__ : List[Any] = False
a__ : List[str] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
a__ : Optional[Any] = model_class(lowerCamelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase__ )
model.train()
a__ : Union[str, Any] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
a__ : int = model(**lowerCamelCase__ ).loss
loss.backward()
def _UpperCamelCase( self : List[str] ):
a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Dict = _config_zero_init(lowerCamelCase__ )
for model_class in self.all_model_classes:
a__ : str = model_class(config=lowerCamelCase__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def _UpperCamelCase( self : Optional[int] ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Tuple = BeitModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def UpperCamelCase_ ( ) -> Any:
a__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase( self : Optional[int] ):
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def _UpperCamelCase( self : str ):
a__ : int = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(lowerCamelCase__ )
a__ : Optional[Any] = self.default_image_processor
a__ : Dict = prepare_img()
a__ : Optional[int] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).pixel_values.to(lowerCamelCase__ )
# prepare bool_masked_pos
a__ : Optional[Any] = torch.ones((1, 196) , dtype=torch.bool ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Any = model(pixel_values=lowerCamelCase__ , bool_masked_pos=lowerCamelCase__ )
a__ : Tuple = outputs.logits
# verify the logits
a__ : List[str] = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape , lowerCamelCase__ )
a__ : Optional[int] = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , lowerCamelCase__ , atol=1E-2 ) )
@slow
def _UpperCamelCase( self : Dict ):
a__ : str = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(lowerCamelCase__ )
a__ : int = self.default_image_processor
a__ : List[Any] = prepare_img()
a__ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Union[str, Any] = model(**lowerCamelCase__ )
a__ : List[str] = outputs.logits
# verify the logits
a__ : Union[str, Any] = torch.Size((1, 1_000) )
self.assertEqual(logits.shape , lowerCamelCase__ )
a__ : int = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
a__ : Tuple = 281
self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ )
@slow
def _UpperCamelCase( self : Any ):
a__ : Dict = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
lowerCamelCase__ )
a__ : str = self.default_image_processor
a__ : List[str] = prepare_img()
a__ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Dict = model(**lowerCamelCase__ )
a__ : List[str] = outputs.logits
# verify the logits
a__ : Optional[int] = torch.Size((1, 21_841) )
self.assertEqual(logits.shape , lowerCamelCase__ )
a__ : Optional[Any] = torch.tensor([1.6881, -0.2787, 0.5901] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
a__ : Optional[Any] = 2_396
self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ )
@slow
def _UpperCamelCase( self : int ):
a__ : Optional[Any] = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
a__ : Tuple = model.to(lowerCamelCase__ )
a__ : List[Any] = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ )
a__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
a__ : Union[str, Any] = Image.open(ds[0]["file"] )
a__ : List[Any] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Optional[Any] = model(**lowerCamelCase__ )
a__ : List[str] = outputs.logits
# verify the logits
a__ : Tuple = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , lowerCamelCase__ )
a__ : int = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
a__ : Dict = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=lowerCamelCase__ , )
else:
a__ : Dict = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=lowerCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def _UpperCamelCase( self : Tuple ):
a__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
a__ : List[Any] = model.to(lowerCamelCase__ )
a__ : int = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ )
a__ : Optional[int] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
a__ : str = Image.open(ds[0]["file"] )
a__ : str = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : List[Any] = model(**lowerCamelCase__ )
a__ : Any = outputs.logits.detach().cpu()
a__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ , target_sizes=[(500, 300)] )
a__ : Optional[int] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
a__ : List[str] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ )
a__ : Any = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
| 37 | 0 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _SCREAMING_SNAKE_CASE:
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__="resnet50" ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=32 ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=True ,) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = parent
__SCREAMING_SNAKE_CASE :Any = out_indices if out_indices is not None else [4]
__SCREAMING_SNAKE_CASE :str = stage_names
__SCREAMING_SNAKE_CASE :Dict = out_features
__SCREAMING_SNAKE_CASE :Dict = backbone
__SCREAMING_SNAKE_CASE :Optional[Any] = batch_size
__SCREAMING_SNAKE_CASE :int = image_size
__SCREAMING_SNAKE_CASE :List[Any] = num_channels
__SCREAMING_SNAKE_CASE :List[Any] = use_pretrained_backbone
__SCREAMING_SNAKE_CASE :List[Any] = is_training
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE :Tuple = self.get_config()
return config, pixel_values
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size ,num_channels=self.num_channels ,out_features=self.out_features ,out_indices=self.out_indices ,stage_names=self.stage_names ,use_pretrained_backbone=self.use_pretrained_backbone ,backbone=self.backbone ,)
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = TimmBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE :Dict = model(lowerCamelCase__ )
self.parent.assertEqual(
result.feature_map[-1].shape ,(self.batch_size, model.channels[-1], 14, 14) ,)
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE :List[str] = config_and_inputs
__SCREAMING_SNAKE_CASE :Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _SCREAMING_SNAKE_CASE( A__ , A__ , A__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : int = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = TimmBackboneModelTester(self )
__SCREAMING_SNAKE_CASE :List[Any] = ConfigTester(self ,config_class=lowerCamelCase__ ,has_text_modality=lowerCamelCase__ )
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = "resnet18"
__SCREAMING_SNAKE_CASE :List[Any] = "microsoft/resnet-18"
__SCREAMING_SNAKE_CASE :str = AutoBackbone.from_pretrained(lowerCamelCase__ ,use_timm_backbone=lowerCamelCase__ )
__SCREAMING_SNAKE_CASE :List[str] = AutoBackbone.from_pretrained(lowerCamelCase__ )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) ,len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices ,(-1,) )
self.assertEqual(transformers_model.out_indices ,[len(timm_model.stage_names ) - 1] )
__SCREAMING_SNAKE_CASE :List[Any] = AutoBackbone.from_pretrained(lowerCamelCase__ ,use_timm_backbone=lowerCamelCase__ ,out_indices=[1, 2, 3] )
__SCREAMING_SNAKE_CASE :int = AutoBackbone.from_pretrained(lowerCamelCase__ ,out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices ,transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
pass
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE :Tuple = model_class(lowerCamelCase__ )
__SCREAMING_SNAKE_CASE :List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE :List[Any] = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE :Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,lowerCamelCase__ )
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE :List[Any] = True
__SCREAMING_SNAKE_CASE :List[str] = self.has_attentions
# no need to test all models as different heads yield the same functionality
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.all_model_classes[0]
__SCREAMING_SNAKE_CASE :Dict = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
__SCREAMING_SNAKE_CASE :int = self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ )
__SCREAMING_SNAKE_CASE :Dict = model(**lowerCamelCase__ )
__SCREAMING_SNAKE_CASE :int = outputs[0][-1]
# Encoder-/Decoder-only models
__SCREAMING_SNAKE_CASE :List[str] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__SCREAMING_SNAKE_CASE :str = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE :Tuple = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE :List[str] = model(**lowerCamelCase__ )
self.assertEqual(len(result.feature_maps ) ,len(config.out_indices ) )
self.assertEqual(len(model.channels ) ,len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__SCREAMING_SNAKE_CASE :Tuple = copy.deepcopy(lowerCamelCase__ )
__SCREAMING_SNAKE_CASE :Any = None
__SCREAMING_SNAKE_CASE :Optional[Any] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE :Optional[int] = model(**lowerCamelCase__ )
self.assertEqual(len(result.feature_maps ) ,1 )
self.assertEqual(len(model.channels ) ,1 )
# Check backbone can be initialized with fresh weights
__SCREAMING_SNAKE_CASE :Any = copy.deepcopy(lowerCamelCase__ )
__SCREAMING_SNAKE_CASE :List[str] = False
__SCREAMING_SNAKE_CASE :Union[str, Any] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE :Optional[int] = model(**lowerCamelCase__ ) | 498 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCamelCase : Dict = logging.get_logger(__name__)
def UpperCamelCase_ ( __a ) -> Union[str, Any]:
a__ : Tuple = R"\w+[.]\d+"
a__ : List[Any] = re.findall(__a , __a )
for pat in pats:
a__ : Union[str, Any] = key.replace(__a , "_".join(pat.split("." ) ) )
return key
def UpperCamelCase_ ( __a , __a , __a ) -> List[str]:
a__ : List[str] = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
a__ : Any = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
a__ : Optional[Any] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
a__ : Union[str, Any] = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
a__ : List[str] = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
a__ : str = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
a__ : Tuple = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
a__ : Tuple = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
a__ : Optional[Any] = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
a__ : Union[str, Any] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCamelCase_ ( __a , __a , __a=42 ) -> str:
# Step 1: Convert pytorch tensor to numpy
a__ : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
a__ : Tuple = flax_model.init_weights(PRNGKey(__a ) )
a__ : Optional[Any] = flatten_dict(__a )
a__ : Union[str, Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
a__ : Optional[int] = rename_key(__a )
a__ : Optional[int] = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
a__, a__ : Union[str, Any] = rename_key_and_reshape_tensor(__a , __a , __a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
a__ : str = jnp.asarray(__a )
return unflatten_dict(__a )
| 37 | 0 |
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = tuple[float, float, float]
SCREAMING_SNAKE_CASE_ = tuple[float, float, float]
def UpperCamelCase__ ( _lowercase : Dict , _lowercase : List[str] ) -> Vectorad:
__UpperCAmelCase: List[str] = end_pointa[0] - end_pointa[0]
__UpperCAmelCase: List[Any] = end_pointa[1] - end_pointa[1]
__UpperCAmelCase: Any = end_pointa[2] - end_pointa[2]
return (x, y, z)
def UpperCamelCase__ ( _lowercase : Union[str, Any] , _lowercase : List[Any] ) -> Vectorad:
__UpperCAmelCase: Dict = ab[1] * ac[2] - ab[2] * ac[1] # *i
__UpperCAmelCase: int = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
__UpperCAmelCase: Any = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def UpperCamelCase__ ( _lowercase : Tuple , _lowercase : Tuple ) -> bool:
return tuple(round(__a , __a ) for x in vector ) == (0, 0, 0)
def UpperCamelCase__ ( _lowercase : List[Any] , _lowercase : Optional[int] , _lowercase : Optional[int] , _lowercase : int = 1_0 ) -> bool:
__UpperCAmelCase: Dict = create_vector(__a , __a )
__UpperCAmelCase: Union[str, Any] = create_vector(__a , __a )
return is_zero_vector(get_ad_vectors_cross(__a , __a ) , __a ) | 523 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCamelCase_ ( ) -> int:
a__ : Any = HfArgumentParser(__a )
a__ : Any = parser.parse_args_into_dataclasses()[0]
a__ : Optional[int] = TensorFlowBenchmark(args=__a )
try:
a__ : Optional[int] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
a__ : Tuple = "Arg --no_{0} is no longer used, please use --no-{0} instead."
a__ : List[Any] = " ".join(str(__a ).split(" " )[:-1] )
a__ : str = ""
a__ : List[Any] = eval(str(__a ).split(" " )[-1] )
a__ : List[str] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__a )
if len(__a ) > 0:
a__ : Tuple = full_error_msg + begin_error_msg + str(__a )
raise ValueError(__a )
benchmark.run()
if __name__ == "__main__":
main()
| 37 | 0 |
'''simple docstring'''
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__lowerCAmelCase : int = logging.getLogger(__name__)
def lowerCAmelCase ( UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : str=1_6 , UpperCamelCase__ : Optional[int] = 1_0 , UpperCamelCase__ : List[str] = 2 ):
"""simple docstring"""
def get_dataset(UpperCamelCase__ : Any ):
__UpperCAmelCase = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__a , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
__UpperCAmelCase = get_dataset(__a )
__UpperCAmelCase = get_dataset(__a )
__UpperCAmelCase = DataLoader(__a , shuffle=__a , batch_size=__a , num_workers=4 )
__UpperCAmelCase = DataLoader(__a , shuffle=__a , batch_size=__a , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any]=None ):
"""simple docstring"""
__UpperCAmelCase = []
for epoch in range(__a ):
# Train quickly
model.train()
for batch in dataloader:
__UpperCAmelCase = batch
__UpperCAmelCase = model(__a )
__UpperCAmelCase = torch.nn.functional.mse_loss(__a , __a )
accelerator.backward(__a )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class A ( nn.Module ):
def __init__( self : Union[str, Any] ) -> Optional[int]:
super().__init__()
__UpperCAmelCase = nn.Parameter(torch.randn(1 ) )
__UpperCAmelCase = nn.Parameter(torch.randn(1 ) )
def snake_case__ ( self : int , __a : List[Any] ) -> Optional[Any]:
return x * self.a + self.b
class A ( unittest.TestCase ):
def snake_case__ ( self : Dict ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
__UpperCAmelCase = DummyModel()
__UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase = dummy_dataloaders()
__UpperCAmelCase = ProjectConfiguration(total_limit=1 , project_dir=lowerCamelCase__ , automatic_checkpoint_naming=lowerCamelCase__ )
# Train baseline
__UpperCAmelCase = Accelerator(project_config=lowerCamelCase__ )
__UpperCAmelCase = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def snake_case__ ( self : List[str] ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
__UpperCAmelCase = DummyModel()
__UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase = dummy_dataloaders()
# Train baseline
__UpperCAmelCase = Accelerator()
__UpperCAmelCase = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save initial
__UpperCAmelCase = os.path.join(lowerCamelCase__ , '''initial''' )
accelerator.save_state(lowerCamelCase__ )
(__UpperCAmelCase) = model.a.item(), model.b.item()
__UpperCAmelCase = optimizer.state_dict()
__UpperCAmelCase = train(3 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
(__UpperCAmelCase) = model.a.item(), model.b.item()
__UpperCAmelCase = optimizer.state_dict()
# Train partially
set_seed(4_2 )
__UpperCAmelCase = DummyModel()
__UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase = dummy_dataloaders()
__UpperCAmelCase = Accelerator()
__UpperCAmelCase = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
accelerator.load_state(lowerCamelCase__ )
(__UpperCAmelCase) = model.a.item(), model.b.item()
__UpperCAmelCase = optimizer.state_dict()
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase = train(2 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save everything
__UpperCAmelCase = os.path.join(lowerCamelCase__ , '''checkpoint''' )
accelerator.save_state(lowerCamelCase__ )
# Load everything back in and make sure all states work
accelerator.load_state(lowerCamelCase__ )
test_rands += train(1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
(__UpperCAmelCase) = model.a.item(), model.b.item()
__UpperCAmelCase = optimizer.state_dict()
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self : Union[str, Any] ) -> Any:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
__UpperCAmelCase = DummyModel()
__UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase = dummy_dataloaders()
__UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=lowerCamelCase__ )
# Train baseline
__UpperCAmelCase = Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ )
__UpperCAmelCase = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save initial
accelerator.save_state()
(__UpperCAmelCase) = model.a.item(), model.b.item()
__UpperCAmelCase = optimizer.state_dict()
__UpperCAmelCase = train(3 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
(__UpperCAmelCase) = model.a.item(), model.b.item()
__UpperCAmelCase = optimizer.state_dict()
# Train partially
set_seed(4_2 )
__UpperCAmelCase = DummyModel()
__UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase = dummy_dataloaders()
__UpperCAmelCase = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=lowerCamelCase__ )
__UpperCAmelCase = Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ )
__UpperCAmelCase = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
accelerator.load_state(os.path.join(lowerCamelCase__ , '''checkpoints''' , '''checkpoint_0''' ) )
(__UpperCAmelCase) = model.a.item(), model.b.item()
__UpperCAmelCase = optimizer.state_dict()
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase = train(2 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCamelCase__ , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
(__UpperCAmelCase) = model.a.item(), model.b.item()
__UpperCAmelCase = optimizer.state_dict()
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self : List[Any] ) -> Optional[int]:
__UpperCAmelCase = torch.tensor([1, 2, 3] )
__UpperCAmelCase = torch.tensor([2, 3, 4] )
__UpperCAmelCase = DummyModel()
__UpperCAmelCase = torch.optim.Adam(net.parameters() )
__UpperCAmelCase = Accelerator()
with self.assertRaises(lowerCamelCase__ ) as ve:
accelerator.register_for_checkpointing(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def snake_case__ ( self : str ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
__UpperCAmelCase = DummyModel()
__UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase = torch.optim.lr_scheduler.StepLR(lowerCamelCase__ , step_size=1 , gamma=0.9_9 )
__UpperCAmelCase = dummy_dataloaders()
__UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=lowerCamelCase__ )
# Train baseline
__UpperCAmelCase = Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ )
__UpperCAmelCase = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save initial
accelerator.save_state()
__UpperCAmelCase = scheduler.state_dict()
train(3 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
self.assertNotEqual(lowerCamelCase__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCamelCase__ , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(lowerCamelCase__ , scheduler.state_dict() )
def snake_case__ ( self : Optional[Any] ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
__UpperCAmelCase = DummyModel()
__UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=lowerCamelCase__ , total_limit=2 )
# Train baseline
__UpperCAmelCase = Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ )
__UpperCAmelCase = accelerator.prepare(lowerCamelCase__ )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(lowerCamelCase__ , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase__ , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase__ , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def snake_case__ ( self : Tuple ) -> Optional[Any]:
__UpperCAmelCase = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = """/tmp/accelerate/state_checkpointing"""
__lowerCAmelCase : Optional[int] = DummyModel()
__lowerCAmelCase : List[Any] = torch.optim.Adam(params=model.parameters(), lr=1e-3)
__lowerCAmelCase : Optional[Any] = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
__lowerCAmelCase : Tuple = dummy_dataloaders()
__lowerCAmelCase : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__lowerCAmelCase : Union[str, Any] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__lowerCAmelCase : Dict = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__lowerCAmelCase : List[str] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__lowerCAmelCase : Tuple = group["""params"""][0].device
break
assert param_device.type == accelerator.device.type
__lowerCAmelCase : Dict = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
__lowerCAmelCase : str = group["""params"""][0].device
break
assert (
param_device.type == torch.device("cpu").type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
__lowerCAmelCase : Tuple = group["""params"""][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 262 |
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
UpperCamelCase : Optional[int] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def UpperCamelCase_ ( __a ) -> Any:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def UpperCamelCase_ ( __a , __a , __a ) -> Any:
return max(metric_fn(__a , __a ) for gt in ground_truths )
def UpperCamelCase_ ( __a , __a , __a ) -> List[str]:
a__ : Tuple = [line.strip() for line in open(__a , "r" ).readlines()]
a__ : Tuple = []
if args.gold_data_mode == "qa":
a__ : Any = pd.read_csv(__a , sep="\t" , header=__a )
for answer_list in data[1]:
a__ : Union[str, Any] = ast.literal_eval(__a )
answers.append(__a )
else:
a__ : List[str] = [line.strip() for line in open(__a , "r" ).readlines()]
a__ : List[str] = [[reference] for reference in references]
a__ : List[str] = 0
for prediction, ground_truths in zip(__a , __a ):
total += 1
em += metric_max_over_ground_truths(__a , __a , __a )
fa += metric_max_over_ground_truths(__a , __a , __a )
a__ : Dict = 100.0 * em / total
a__ : Optional[Any] = 100.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]:
a__ : Optional[Any] = args.k
a__ : str = [line.strip() for line in open(__a , "r" ).readlines()]
a__ : Tuple = [line.strip() for line in open(__a , "r" ).readlines()]
a__ : Tuple = 0
for hypo, reference in zip(__a , __a ):
a__ : Any = set(hypo.split("\t" )[:k] )
a__ : Union[str, Any] = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
a__ : Union[str, Any] = 100.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]:
def strip_title(__a ):
if title.startswith("\"" ):
a__ : Optional[Any] = title[1:]
if title.endswith("\"" ):
a__ : Union[str, Any] = title[:-1]
return title
a__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__a , return_tensors="pt" , padding=__a , truncation=__a , )["input_ids"].to(args.device )
a__ : Optional[int] = rag_model.rag.question_encoder(__a )
a__ : Union[str, Any] = question_enc_outputs[0]
a__ : Optional[int] = rag_model.retriever(
__a , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
a__ : List[Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
a__ : int = []
for docs in all_docs:
a__ : Optional[int] = [strip_title(__a ) for title in docs["title"]]
provenance_strings.append("\t".join(__a ) )
return provenance_strings
def UpperCamelCase_ ( __a , __a , __a ) -> Dict:
with torch.no_grad():
a__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__a , return_tensors="pt" , padding=__a , truncation=__a )
a__ : Any = inputs_dict.input_ids.to(args.device )
a__ : Dict = inputs_dict.attention_mask.to(args.device )
a__ : Optional[int] = rag_model.generate( # rag_model overwrites generate
__a , attention_mask=__a , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__a , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
a__ : int = rag_model.retriever.generator_tokenizer.batch_decode(__a , skip_special_tokens=__a )
if args.print_predictions:
for q, a in zip(__a , __a ):
logger.info("Q: {} - A: {}".format(__a , __a ) )
return answers
def UpperCamelCase_ ( ) -> List[str]:
a__ : int = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=__a , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=__a , choices=["exact", "compressed", "legacy"] , type=__a , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=__a , type=__a , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=__a , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=__a , type=__a , required=__a , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=__a , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=__a , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=__a , type=__a , required=__a , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=__a , type=__a , required=__a , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=__a , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=__a , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=__a , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=__a , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=__a , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=__a , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
a__ : int = parser.parse_args()
a__ : Dict = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def UpperCamelCase_ ( __a ) -> Optional[int]:
a__ : Tuple = {}
if args.model_type is None:
a__ : List[str] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
a__ : int = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
a__ : Tuple = args.n_docs
if args.index_name is not None:
a__ : Any = args.index_name
if args.index_path is not None:
a__ : int = args.index_path
else:
a__ : Optional[Any] = BartForConditionalGeneration
a__ : Tuple = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , __a )
a__ : Any = get_scores if args.eval_mode == "e2e" else get_precision_at_k
a__ : Union[str, Any] = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(__a , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(__a ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
a__ : str = RagRetriever.from_pretrained(__a , **__a )
a__ : Optional[int] = model_class.from_pretrained(__a , retriever=__a , **__a )
model.retriever.init_retrieval()
else:
a__ : Dict = model_class.from_pretrained(__a , **__a )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
a__ : List[Any] = []
for line in tqdm(__a ):
questions.append(line.strip() )
if len(__a ) == args.eval_batch_size:
a__ : Union[str, Any] = evaluate_batch_fn(__a , __a , __a )
preds_file.write("\n".join(__a ) + "\n" )
preds_file.flush()
a__ : Any = []
if len(__a ) > 0:
a__ : List[str] = evaluate_batch_fn(__a , __a , __a )
preds_file.write("\n".join(__a ) )
preds_file.flush()
score_fn(__a , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
UpperCamelCase : List[Any] = get_args()
main(args)
| 37 | 0 |
'''simple docstring'''
from __future__ import annotations
def snake_case ( a_ : List[Any] , a_ : Optional[Any] ) -> list[list[int]]:
"""simple docstring"""
UpperCamelCase_ : list[list[int]] = []
UpperCamelCase_ : list[int] = []
UpperCamelCase_ : List[Any] = 0
UpperCamelCase_ : Dict = sum(__a )
create_state_space_tree(__a , __a , __a , __a , __a , __a )
return result
def snake_case ( a_ : Tuple , a_ : str , a_ : Any , a_ : List[str] , a_ : Tuple , a_ : Union[str, Any] , ) -> None:
"""simple docstring"""
if sum(__a ) > max_sum or (remaining_nums_sum + sum(__a )) < max_sum:
return
if sum(__a ) == max_sum:
result.append(__a )
return
for index in range(__a , len(__a ) ):
create_state_space_tree(
__a , __a , index + 1 , [*path, nums[index]] , __a , remaining_nums_sum - nums[index] , )
UpperCamelCase =[3, 34, 4, 12, 5, 2]
UpperCamelCase =9
UpperCamelCase =generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 208 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a = None , ) -> str:
a__ : int = {}
if train_file is not None:
a__ : int = [train_file]
if eval_file is not None:
a__ : Union[str, Any] = [eval_file]
if test_file is not None:
a__ : str = [test_file]
a__ : Optional[Any] = datasets.load_dataset("csv" , data_files=__a )
a__ : List[Any] = list(ds[list(files.keys() )[0]].features.keys() )
a__ : str = features_name.pop(__a )
a__ : Dict = list(set(ds[list(files.keys() )[0]][label_name] ) )
a__ : str = {label: i for i, label in enumerate(__a )}
a__ : Tuple = tokenizer.model_input_names
a__ : List[str] = {}
if len(__a ) == 1:
for k in files.keys():
a__ : Optional[Any] = ds[k].map(
lambda __a : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__a , max_length=__a , padding="max_length" ) , batched=__a , )
elif len(__a ) == 2:
for k in files.keys():
a__ : Dict = ds[k].map(
lambda __a : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__a , max_length=__a , padding="max_length" , ) , batched=__a , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
a__ : str = {k: v for k, v in ex.items() if k in input_names}
a__ : str = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
a__ : Tuple = {k: v for k, v in ex.items() if k in input_names}
a__ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
a__ : List[Any] = {k: v for k, v in ex.items() if k in input_names}
a__ : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
a__ : Optional[Any] = (
tf.data.Dataset.from_generator(
__a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
a__ : Optional[int] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
a__ : Union[str, Any] = (
tf.data.Dataset.from_generator(
__a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
a__ : Optional[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
a__ : Union[str, Any] = (
tf.data.Dataset.from_generator(
__a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
a__ : Tuple = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
UpperCamelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass
class A__ :
"""simple docstring"""
_lowercase = field(metadata={'help': 'Which column contains the label'} )
_lowercase = field(default=A__ , metadata={'help': 'The path of the training file'} )
_lowercase = field(default=A__ , metadata={'help': 'The path of the development file'} )
_lowercase = field(default=A__ , metadata={'help': 'The path of the test file'} )
_lowercase = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_lowercase = field(
default=A__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class A__ :
"""simple docstring"""
_lowercase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_lowercase = field(
default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_lowercase = field(
default=A__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_lowercase = field(default=A__ , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_lowercase = field(
default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def UpperCamelCase_ ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a__ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
a__, a__, a__ : str = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a__ : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a__, a__, a__, a__ : Optional[Any] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__a , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
a__ : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__a ) , labelaid=__a , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
a__ : Any = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , )
def compute_metrics(__a ) -> Dict:
a__ : Union[str, Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
a__ : Dict = TFTrainer(
model=__a , args=__a , train_dataset=__a , eval_dataset=__a , compute_metrics=__a , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a__ : Optional[Any] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a__ : Dict = trainer.evaluate()
a__ : int = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(__a , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(__a )
return results
if __name__ == "__main__":
main()
| 37 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
lowercase : Any = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowercase : Union[str, Any] = {
"""vocab_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"""
),
"""google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""",
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"""
),
"""google/electra-base-generator""": (
"""https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"""
),
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"""
),
},
}
lowercase : Optional[int] = {
"""google/electra-small-generator""": 5_1_2,
"""google/electra-base-generator""": 5_1_2,
"""google/electra-large-generator""": 5_1_2,
"""google/electra-small-discriminator""": 5_1_2,
"""google/electra-base-discriminator""": 5_1_2,
"""google/electra-large-discriminator""": 5_1_2,
}
lowercase : Any = {
"""google/electra-small-generator""": {"""do_lower_case""": True},
"""google/electra-base-generator""": {"""do_lower_case""": True},
"""google/electra-large-generator""": {"""do_lower_case""": True},
"""google/electra-small-discriminator""": {"""do_lower_case""": True},
"""google/electra-base-discriminator""": {"""do_lower_case""": True},
"""google/electra-large-discriminator""": {"""do_lower_case""": True},
}
class a__ ( A__ ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_INIT_CONFIGURATION
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ElectraTokenizer
def __init__( self : int , A_ : Optional[Any]=None , A_ : Union[str, Any]=None , A_ : Tuple=True , A_ : Optional[Any]="[UNK]" , A_ : List[Any]="[SEP]" , A_ : List[Any]="[PAD]" , A_ : str="[CLS]" , A_ : List[Any]="[MASK]" , A_ : Dict=True , A_ : Optional[int]=None , **A_ : str , ) -> int:
"""simple docstring"""
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , )
lowerCamelCase_: List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCamelCase__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCamelCase__ ) != tokenize_chinese_chars
):
lowerCamelCase_: Any = getattr(lowerCamelCase__ , normalizer_state.pop("""type""" ) )
lowerCamelCase_: Union[str, Any] = do_lower_case
lowerCamelCase_: Optional[int] = strip_accents
lowerCamelCase_: int = tokenize_chinese_chars
lowerCamelCase_: Tuple = normalizer_class(**lowerCamelCase__ )
lowerCamelCase_: Union[str, Any] = do_lower_case
def lowerCAmelCase ( self : Any , A_ : Dict , A_ : Dict=None ) -> Tuple:
"""simple docstring"""
lowerCamelCase_: Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase ( self : List[Any] , A_ : List[int] , A_ : Optional[List[int]] = None ) -> Dict:
"""simple docstring"""
lowerCamelCase_: Optional[int] = [self.sep_token_id]
lowerCamelCase_: List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase ( self : Optional[Any] , A_ : str , A_ : Optional[str] = None ) -> Any:
"""simple docstring"""
lowerCamelCase_: Union[str, Any] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 423 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
UpperCamelCase : List[str] = re.compile(r"""\b(a|an|the)\b""", re.UNICODE)
UpperCamelCase : Union[str, Any] = None
def UpperCamelCase_ ( ) -> List[str]:
a__ : List[Any] = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=__a , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=__a , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def UpperCamelCase_ ( __a ) -> str:
a__ : Optional[Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a__ : Dict = bool(qa["answers"]["text"] )
return qid_to_has_ans
def UpperCamelCase_ ( __a ) -> List[Any]:
def remove_articles(__a ):
return ARTICLES_REGEX.sub(" " , __a )
def white_space_fix(__a ):
return " ".join(text.split() )
def remove_punc(__a ):
a__ : Union[str, Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__a ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__a ) ) ) )
def UpperCamelCase_ ( __a ) -> Dict:
if not s:
return []
return normalize_answer(__a ).split()
def UpperCamelCase_ ( __a , __a ) -> str:
return int(normalize_answer(__a ) == normalize_answer(__a ) )
def UpperCamelCase_ ( __a , __a ) -> Dict:
a__ : int = get_tokens(__a )
a__ : Optional[Any] = get_tokens(__a )
a__ : Any = collections.Counter(__a ) & collections.Counter(__a )
a__ : Dict = sum(common.values() )
if len(__a ) == 0 or len(__a ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
a__ : Tuple = 1.0 * num_same / len(__a )
a__ : str = 1.0 * num_same / len(__a )
a__ : str = (2 * precision * recall) / (precision + recall)
return fa
def UpperCamelCase_ ( __a , __a ) -> int:
a__ : List[str] = {}
a__ : Optional[int] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a__ : List[Any] = qa["id"]
a__ : Dict = [t for t in qa["answers"]["text"] if normalize_answer(__a )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
a__ : Tuple = [""]
if qid not in preds:
print(f'''Missing prediction for {qid}''' )
continue
a__ : Tuple = preds[qid]
# Take max over all gold answers
a__ : Optional[int] = max(compute_exact(__a , __a ) for a in gold_answers )
a__ : str = max(compute_fa(__a , __a ) for a in gold_answers )
return exact_scores, fa_scores
def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[int]:
a__ : Optional[Any] = {}
for qid, s in scores.items():
a__ : Dict = na_probs[qid] > na_prob_thresh
if pred_na:
a__ : Dict = float(not qid_to_has_ans[qid] )
else:
a__ : Optional[Any] = s
return new_scores
def UpperCamelCase_ ( __a , __a , __a=None ) -> Tuple:
if not qid_list:
a__ : Union[str, Any] = len(__a )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
a__ : int = len(__a )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def UpperCamelCase_ ( __a , __a , __a ) -> List[str]:
for k in new_eval:
a__ : Optional[Any] = new_eval[k]
def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[int]:
plt.step(__a , __a , color="b" , alpha=0.2 , where="post" )
plt.fill_between(__a , __a , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__a )
plt.savefig(__a )
plt.clf()
def UpperCamelCase_ ( __a , __a , __a , __a , __a=None , __a=None ) -> Dict:
a__ : Optional[Any] = sorted(__a , key=lambda __a : na_probs[k] )
a__ : Any = 0.0
a__ : Optional[int] = 1.0
a__ : Optional[int] = 0.0
a__ : Any = [1.0]
a__ : Tuple = [0.0]
a__ : List[str] = 0.0
for i, qid in enumerate(__a ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
a__ : Any = true_pos / float(i + 1 )
a__ : int = true_pos / float(__a )
if i == len(__a ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__a )
recalls.append(__a )
if out_image:
plot_pr_curve(__a , __a , __a , __a )
return {"ap": 100.0 * avg_prec}
def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a ) -> str:
if out_image_dir and not os.path.exists(__a ):
os.makedirs(__a )
a__ : Optional[int] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
a__ : Optional[int] = make_precision_recall_eval(
__a , __a , __a , __a , out_image=os.path.join(__a , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
a__ : Optional[Any] = make_precision_recall_eval(
__a , __a , __a , __a , out_image=os.path.join(__a , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
a__ : str = {k: float(__a ) for k, v in qid_to_has_ans.items()}
a__ : Optional[Any] = make_precision_recall_eval(
__a , __a , __a , __a , out_image=os.path.join(__a , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(__a , __a , "pr_exact" )
merge_eval(__a , __a , "pr_f1" )
merge_eval(__a , __a , "pr_oracle" )
def UpperCamelCase_ ( __a , __a , __a , __a ) -> str:
if not qid_list:
return
a__ : Optional[Any] = [na_probs[k] for k in qid_list]
a__ : str = np.ones_like(__a ) / float(len(__a ) )
plt.hist(__a , weights=__a , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(f'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(__a , f'''na_prob_hist_{name}.png''' ) )
plt.clf()
def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[Any]:
a__ : str = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
a__ : Optional[Any] = num_no_ans
a__ : Dict = cur_score
a__ : Any = 0.0
a__ : Optional[Any] = sorted(__a , key=lambda __a : na_probs[k] )
for i, qid in enumerate(__a ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
a__ : Optional[int] = scores[qid]
else:
if preds[qid]:
a__ : str = -1
else:
a__ : Union[str, Any] = 0
cur_score += diff
if cur_score > best_score:
a__ : Any = cur_score
a__ : Dict = na_probs[qid]
return 100.0 * best_score / len(__a ), best_thresh
def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a ) -> Any:
a__, a__ : Tuple = find_best_thresh(__a , __a , __a , __a )
a__, a__ : Tuple = find_best_thresh(__a , __a , __a , __a )
a__ : Any = best_exact
a__ : Any = exact_thresh
a__ : List[Any] = best_fa
a__ : Optional[int] = fa_thresh
def UpperCamelCase_ ( ) -> Tuple:
with open(OPTS.data_file ) as f:
a__ : List[Any] = json.load(__a )
a__ : Any = dataset_json["data"]
with open(OPTS.pred_file ) as f:
a__ : int = json.load(__a )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
a__ : List[str] = json.load(__a )
else:
a__ : Optional[int] = {k: 0.0 for k in preds}
a__ : Optional[Any] = make_qid_to_has_ans(__a ) # maps qid to True/False
a__ : List[Any] = [k for k, v in qid_to_has_ans.items() if v]
a__ : Union[str, Any] = [k for k, v in qid_to_has_ans.items() if not v]
a__, a__ : str = get_raw_scores(__a , __a )
a__ : str = apply_no_ans_threshold(__a , __a , __a , OPTS.na_prob_thresh )
a__ : str = apply_no_ans_threshold(__a , __a , __a , OPTS.na_prob_thresh )
a__ : Tuple = make_eval_dict(__a , __a )
if has_ans_qids:
a__ : str = make_eval_dict(__a , __a , qid_list=__a )
merge_eval(__a , __a , "HasAns" )
if no_ans_qids:
a__ : List[Any] = make_eval_dict(__a , __a , qid_list=__a )
merge_eval(__a , __a , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(__a , __a , __a , __a , __a , __a )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__a , __a , __a , __a , __a , OPTS.out_image_dir )
histogram_na_prob(__a , __a , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(__a , __a , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(__a , __a )
else:
print(json.dumps(__a , indent=2 ) )
if __name__ == "__main__":
UpperCamelCase : Any = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 37 | 0 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def lowercase ( self : Union[str, Any] ) -> int:
__lowerCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase__ , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(lowerCamelCase__ , 'num_attention_heads' ) )
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any]=1_3 , lowerCAmelCase_ : Dict=6_4 , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : Union[str, Any]=1_6 , lowerCAmelCase_ : List[str]=[1_2_8, 2_5_6, 3_8_4] , lowerCAmelCase_ : Optional[int]=[4, 6, 8] , lowerCAmelCase_ : Optional[int]=[2, 3, 4] , lowerCAmelCase_ : Tuple=[1_6, 1_6, 1_6] , lowerCAmelCase_ : Optional[int]=0 , lowerCAmelCase_ : Optional[Any]=[2, 2, 2] , lowerCAmelCase_ : int=[2, 2, 2] , lowerCAmelCase_ : Optional[int]=0.02 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Optional[Any]=2 , ) -> Dict:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = kernel_size
__lowerCAmelCase = stride
__lowerCAmelCase = padding
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = depths
__lowerCAmelCase = key_dim
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = patch_size
__lowerCAmelCase = attention_ratio
__lowerCAmelCase = mlp_ratio
__lowerCAmelCase = initializer_range
__lowerCAmelCase = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = num_labels
__lowerCAmelCase = initializer_range
def lowercase ( self : Dict ) -> Tuple:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def lowercase ( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] ) -> Any:
__lowerCAmelCase = LevitModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCAmelCase = model(lowerCamelCase__ )
__lowerCAmelCase = (self.image_size, self.image_size)
__lowerCAmelCase = image_size[0], image_size[1]
for _ in range(4 ):
__lowerCAmelCase = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
__lowerCAmelCase = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def lowercase ( self : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] ) -> str:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = LevitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCAmelCase = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : str ) -> List[Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
a_ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
a_ = (
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def lowercase ( self : Optional[int] ) -> Union[str, Any]:
__lowerCAmelCase = LevitModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=3_7 )
def lowercase ( self : Optional[int] ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : Dict ) -> Union[str, Any]:
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def lowercase ( self : Optional[int] ) -> Optional[int]:
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def lowercase ( self : Union[str, Any] ) -> List[Any]:
pass
@unittest.skip(reason='Levit does not output attentions' )
def lowercase ( self : Optional[int] ) -> Any:
pass
def lowercase ( self : str ) -> List[str]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCamelCase__ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def lowercase ( self : Dict ) -> Optional[Any]:
def check_hidden_states_output(lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCAmelCase = outputs.hidden_states
__lowerCAmelCase = len(self.model_tester.depths ) + 1
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCAmelCase = (self.model_tester.image_size, self.model_tester.image_size)
__lowerCAmelCase = image_size[0], image_size[1]
for _ in range(4 ):
__lowerCAmelCase = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
__lowerCAmelCase = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase ( self : Optional[int] ) -> List[str]:
pass
def lowercase ( self : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str]=False ) -> Tuple:
__lowerCAmelCase = super()._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowercase ( self : List[str] ) -> int:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowercase ( self : str ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
if not self.model_tester.is_training:
return
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCamelCase__ )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
__lowerCAmelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
__lowerCAmelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
__lowerCAmelCase = model(**lowerCamelCase__ ).loss
loss.backward()
def lowercase ( self : str ) -> Dict:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__lowerCAmelCase = False
__lowerCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
__lowerCAmelCase = model_class(lowerCamelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase__ )
model.train()
__lowerCAmelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
__lowerCAmelCase = model(**lowerCamelCase__ ).loss
loss.backward()
def lowercase ( self : Any ) -> int:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCamelCase__ ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"""Testing {model_class} with {problem_type["title"]}""" ):
__lowerCAmelCase = problem_type["title"]
__lowerCAmelCase = problem_type["num_labels"]
__lowerCAmelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
__lowerCAmelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if problem_type["num_labels"] > 1:
__lowerCAmelCase = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
__lowerCAmelCase = inputs["labels"].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCamelCase__ ) as warning_list:
__lowerCAmelCase = model(**lowerCamelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def lowercase ( self : int ) -> str:
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = LevitModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def a_ ( ):
__lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase ( self : Optional[Any] ) -> List[str]:
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowercase ( self : List[Any] ) -> Optional[int]:
__lowerCAmelCase = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowerCamelCase__ )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**lowerCamelCase__ )
# verify the logits
__lowerCAmelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
__lowerCAmelCase = torch.tensor([1.04_48, -0.37_45, -1.83_17] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 53 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = CLIPTokenizer
_lowercase = CLIPTokenizerFast
_lowercase = True
_lowercase = {}
_lowercase = False
def _UpperCamelCase( self : List[Any] ):
super().setUp()
# fmt: off
a__ : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
a__ : Optional[Any] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
a__ : Optional[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
a__ : Optional[Any] = {"unk_token": "<unk>"}
a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCamelCase__ ) )
def _UpperCamelCase( self : Dict , **lowerCamelCase__ : int ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def _UpperCamelCase( self : Union[str, Any] , **lowerCamelCase__ : Optional[int] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : Optional[Any] ):
a__ : int = "lower newer"
a__ : Optional[int] = "lower newer"
return input_text, output_text
def _UpperCamelCase( self : List[str] ):
a__ : Union[str, Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a__ : int = "lower newer"
a__ : List[str] = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
a__ : Union[str, Any] = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : int = tokens + [tokenizer.unk_token]
a__ : Union[str, Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
@require_ftfy
def _UpperCamelCase( self : Optional[Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a__ : List[str] = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
a__ : Any = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
a__ : int = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
a__ : Optional[Any] = tokenizer_s.tokenize(lowerCamelCase__ )
a__ : Dict = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
a__ : Optional[Any] = "xa\u0303y" + " " + "x\xe3y"
a__ : Optional[int] = tokenizer_s.tokenize(lowerCamelCase__ )
a__ : int = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Test that the tokenization is identical on unicode of space type
a__ : str = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
a__ : Any = tokenizer_s.tokenize(lowerCamelCase__ )
a__ : int = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Test that the tokenization is identical on unicode of line break type
a__ : Union[str, Any] = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
a__ : List[Any] = tokenizer_s.tokenize(lowerCamelCase__ )
a__ : int = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a__ : str = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
a__ : Tuple = f'''{text_of_1_token} {text_of_1_token}'''
a__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , )
a__ : Union[str, Any] = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase__ ) + 1, len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
a__ : Optional[Any] = f''' {text}'''
a__ : str = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , )
a__ : Dict = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase__ ) + 1, 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
def _UpperCamelCase( self : int ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowerCamelCase__ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def _UpperCamelCase( self : int ):
super().test_tokenization_python_rust_equals()
def _UpperCamelCase( self : str ):
# CLIP always lower cases letters
pass
| 37 | 0 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> bool:
'''simple docstring'''
UpperCAmelCase = str(__a )
return n == n[::-1]
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ = 100_0000 ) -> str:
'''simple docstring'''
UpperCAmelCase = 0
for i in range(1 , __a ):
if is_palindrome(__a ) and is_palindrome(bin(__a ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 130 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
UpperCamelCase : Dict = """<<<<<<< This should probably be modified because it mentions: """
UpperCamelCase : List[Any] = """=======
>>>>>>>
"""
UpperCamelCase : Optional[Any] = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
UpperCamelCase : Any = [
# (pattern, replacement)
# Order is important here for some replacements
(r"""tfds\.core""", r"""datasets"""),
(r"""tf\.io\.gfile\.GFile""", r"""open"""),
(r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""),
(r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""),
(r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""),
(r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""),
(r"""tfds\.features\.FeaturesDict\(""", r"""dict("""),
(r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(r"""tfds\.""", r"""datasets."""),
(r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""),
(r"""self\.builder_config""", r"""self.config"""),
]
def UpperCamelCase_ ( __a ) -> Optional[Any]:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class A__ ( A__ ):
"""simple docstring"""
@staticmethod
def _UpperCamelCase( lowerCamelCase__ : ArgumentParser ):
a__ : List[str] = parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str , *lowerCamelCase__ : Tuple ):
a__ : str = get_logger("datasets-cli/converting" )
a__ : Optional[Any] = tfds_path
a__ : Optional[int] = datasets_directory
def _UpperCamelCase( self : int ):
if os.path.isdir(self._tfds_path ):
a__ : List[str] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
a__ : Any = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
a__ : Dict = os.path.abspath(self._datasets_directory )
self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
a__ : Tuple = []
a__ : str = []
a__ : List[Any] = {}
if os.path.isdir(self._tfds_path ):
a__ : List[str] = os.listdir(lowerCamelCase__ )
else:
a__ : Union[str, Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'''Looking at file {f_name}''' )
a__ : Any = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
a__ : Dict = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
if not os.path.isfile(lowerCamelCase__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(lowerCamelCase__ , encoding="utf-8" ) as f:
a__ : List[Any] = f.readlines()
a__ : Union[str, Any] = []
a__ : Union[str, Any] = False
a__ : Union[str, Any] = False
a__ : Dict = []
for line in lines:
a__ : Optional[Any] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
a__ : List[Any] = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
a__ : List[str] = ""
continue
elif "from absl import logging" in out_line:
a__ : Dict = "from datasets import logging\n"
elif "getLogger" in out_line:
a__ : List[Any] = out_line.replace("getLogger" , "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
a__ : List[str] = True
a__ : Dict = list(filter(lambda lowerCamelCase__ : e in out_line , lowerCamelCase__ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCamelCase__ ) + "\n" )
out_lines.append(lowerCamelCase__ )
out_lines.append(lowerCamelCase__ )
continue
else:
for pattern, replacement in TO_CONVERT:
a__ : Tuple = re.sub(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
a__ : Optional[int] = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , lowerCamelCase__ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
a__ : Optional[Any] = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
a__ : Optional[int] = True
out_lines.append(lowerCamelCase__ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
a__ : Dict = f_name.replace(".py" , "" )
a__ : Optional[int] = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
a__ : Any = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
self._logger.info(f'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCamelCase__ )
if needs_manual_update:
with_manual_update.append(lowerCamelCase__ )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f:
f.writelines(lowerCamelCase__ )
self._logger.info(f'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
a__ : Any = os.path.basename(lowerCamelCase__ )
a__ : Optional[int] = imports_to_builder_map[f_name.replace(".py" , "" )]
self._logger.info(f'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(lowerCamelCase__ , lowerCamelCase__ )
except KeyError:
self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 37 | 0 |
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class a_ ( A__ ):
def __init__( self : Optional[int] , snake_case__ : str = "▁" , snake_case__ : bool = True , snake_case__ : Union[str, AddedToken] = "<unk>" , snake_case__ : Union[str, AddedToken] = "</s>" , snake_case__ : Union[str, AddedToken] = "<pad>" , ):
lowerCAmelCase__ = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
lowerCAmelCase__ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
lowerCAmelCase__ = token_dict["token"]
lowerCAmelCase__ = Tokenizer(Unigram() )
lowerCAmelCase__ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(""" {2,}""" ) , """ """ ),
normalizers.Lowercase(),
] )
lowerCAmelCase__ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ ),
pre_tokenizers.Digits(individual_digits=lowerCamelCase__ ),
pre_tokenizers.Punctuation(),
] )
lowerCAmelCase__ = decoders.Metaspace(replacement=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ )
lowerCAmelCase__ = TemplateProcessing(
single=F"""$A {self.special_tokens['eos']['token']}""" , special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] , )
lowerCAmelCase__ = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Union[str, List[str]] , snake_case__ : int = 8000 , snake_case__ : bool = True , ):
lowerCAmelCase__ = trainers.UnigramTrainer(
vocab_size=lowerCamelCase__ , special_tokens=self.special_tokens_list , show_progress=lowerCamelCase__ , )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase__ = [files]
self._tokenizer.train(lowerCamelCase__ , trainer=lowerCamelCase__ )
self.add_unk_id()
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Union[Iterator[str], Iterator[Iterator[str]]] , snake_case__ : int = 8000 , snake_case__ : bool = True , ):
lowerCAmelCase__ = trainers.UnigramTrainer(
vocab_size=lowerCamelCase__ , special_tokens=self.special_tokens_list , show_progress=lowerCamelCase__ , )
self._tokenizer.train_from_iterator(lowerCamelCase__ , trainer=lowerCamelCase__ )
self.add_unk_id()
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = json.loads(self._tokenizer.to_str() )
lowerCAmelCase__ = self.special_tokens["unk"]["id"]
lowerCAmelCase__ = Tokenizer.from_str(json.dumps(lowerCamelCase__ ) )
| 644 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class A__ ( A__ ):
"""simple docstring"""
_lowercase = ''
_lowercase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_lowercase = None # compression type in fsspec. ex: "gzip"
_lowercase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : List[str] , lowerCamelCase__ : str = "" , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[dict] = None , **lowerCamelCase__ : List[str] ):
super().__init__(self , **lowerCamelCase__ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
a__ : str = fsspec.open(
lowerCamelCase__ , mode="rb" , protocol=lowerCamelCase__ , compression=self.compression , client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
a__ : Optional[int] = os.path.basename(self.file.path.split("::" )[0] )
a__ : int = (
self.compressed_name[: self.compressed_name.rindex("." )]
if "." in self.compressed_name
else self.compressed_name
)
a__ : List[Any] = None
@classmethod
def _UpperCamelCase( cls : int , lowerCamelCase__ : int ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowerCamelCase__ ).lstrip("/" )
def _UpperCamelCase( self : Dict ):
if self.dir_cache is None:
a__ : Dict = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name}
a__ : int = {f["name"]: f}
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : str ):
return self.file.open().read()
def _UpperCamelCase( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str = "rb" , lowerCamelCase__ : int=None , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[str]=None , **lowerCamelCase__ : Optional[Any] , ):
a__ : Optional[int] = self._strip_protocol(lowerCamelCase__ )
if mode != "rb":
raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'bz2'
_lowercase = 'bz2'
_lowercase = '.bz2'
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'gzip'
_lowercase = 'gzip'
_lowercase = '.gz'
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'lz4'
_lowercase = 'lz4'
_lowercase = '.lz4'
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'xz'
_lowercase = 'xz'
_lowercase = '.xz'
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'zstd'
_lowercase = 'zstd'
_lowercase = '.zst'
def __init__( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : str = "rb" , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[dict] = None , lowerCamelCase__ : int = DEFAULT_BLOCK_SIZE , **lowerCamelCase__ : Tuple , ):
super().__init__(
fo=lowerCamelCase__ , mode=lowerCamelCase__ , target_protocol=lowerCamelCase__ , target_options=lowerCamelCase__ , block_size=lowerCamelCase__ , **lowerCamelCase__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
a__ : Any = self.file.__enter__
class A__ :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase__ : str ):
a__ : List[Any] = file_
def __enter__( self : str ):
self._file.__enter__()
return self
def __exit__( self : int , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : int ):
self._file.__exit__(*lowerCamelCase__ , **lowerCamelCase__ )
def __iter__( self : List[str] ):
return iter(self._file )
def _UpperCamelCase( self : Any ):
return next(self._file )
def __getattr__( self : Optional[Any] , lowerCamelCase__ : Tuple ):
return getattr(self._file , lowerCamelCase__ )
def fixed_enter(*lowerCamelCase__ : List[str] , **lowerCamelCase__ : str ):
return WrappedFile(_enter(*lowerCamelCase__ , **lowerCamelCase__ ) )
a__ : Any = fixed_enter
| 37 | 0 |
"""simple docstring"""
from math import pi
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : List[str] ):
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 506 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]:
a__ : Union[str, Any] = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
a__ : Union[str, Any] = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__a ):
os.makedirs(__a )
a__ : Any = model.state_dict()
def to_tf_var_name(__a ):
for patt, repl in iter(__a ):
a__ : Tuple = name.replace(__a , __a )
return f'''bert/{name}'''
def create_tf_var(__a , __a , __a ):
a__ : Tuple = tf.dtypes.as_dtype(tensor.dtype )
a__ : Dict = tf.get_variable(dtype=__a , shape=tensor.shape , name=__a , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__a )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
a__ : int = to_tf_var_name(__a )
a__ : Union[str, Any] = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
a__ : int = torch_tensor.T
a__ : Optional[Any] = create_tf_var(tensor=__a , name=__a , session=__a )
tf.keras.backend.set_value(__a , __a )
a__ : int = session.run(__a )
print(f'''Successfully created {tf_name}: {np.allclose(__a , __a )}''' )
a__ : Any = tf.train.Saver(tf.trainable_variables() )
saver.save(__a , os.path.join(__a , model_name.replace("-" , "_" ) + ".ckpt" ) )
def UpperCamelCase_ ( __a=None ) -> int:
a__ : Dict = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=__a , required=__a , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=__a , default=__a , required=__a , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=__a , required=__a , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=__a , required=__a , help="Directory in which to save tensorflow model" )
a__ : Optional[Any] = parser.parse_args(__a )
a__ : Tuple = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__a , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 37 | 0 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : Optional[int] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
__snake_case : List[Any] = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
__snake_case : Optional[Any] = {
"""allenai/led-base-16384""": 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _lowercase ( ):
_a = (
list(range(ord("!" ), ord("~" ) + 1 ) ) + list(range(ord("¡" ), ord("¬" ) + 1 ) ) + list(range(ord("®" ), ord("ÿ" ) + 1 ) )
)
_a = bs[:]
_a = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__a )
cs.append(2**8 + n )
n += 1
_a = [chr(__a ) for n in cs]
return dict(zip(__a, __a ) )
def _lowercase ( lowerCamelCase__ : List[str] ):
_a = set()
_a = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_a = char
return pairs
class A ( A__ ):
__UpperCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : str = ["""input_ids""", """attention_mask"""]
def __init__( self , snake_case_ , snake_case_ , snake_case_="replace" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=False , **snake_case_ , ) -> str:
_a = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else bos_token
_a = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else eos_token
_a = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else sep_token
_a = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else cls_token
_a = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else unk_token
_a = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_a = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , **lowerCamelCase__ , )
with open(lowerCamelCase__ , encoding="utf-8" ) as vocab_handle:
_a = json.load(lowerCamelCase__ )
_a = {v: k for k, v in self.encoder.items()}
_a = errors # how to handle errors in decoding
_a = bytes_to_unicode()
_a = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase__ , encoding="utf-8" ) as merges_handle:
_a = merges_handle.read().split("\n" )[1:-1]
_a = [tuple(merge.split() ) for merge in bpe_merges]
_a = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
_a = {}
_a = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_a = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def __lowerCAmelCase ( self ) -> int:
return len(self.encoder )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCAmelCase ( self , snake_case_ ) -> str:
if token in self.cache:
return self.cache[token]
_a = tuple(lowerCamelCase__ )
_a = get_pairs(lowerCamelCase__ )
if not pairs:
return token
while True:
_a = min(lowerCamelCase__ , key=lambda snake_case_ : self.bpe_ranks.get(lowerCamelCase__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
_a = bigram
_a = []
_a = 0
while i < len(lowerCamelCase__ ):
try:
_a = word.index(lowerCamelCase__ , lowerCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_a = j
if word[i] == first and i < len(lowerCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_a = tuple(lowerCamelCase__ )
_a = new_word
if len(lowerCamelCase__ ) == 1:
break
else:
_a = get_pairs(lowerCamelCase__ )
_a = " ".join(lowerCamelCase__ )
_a = word
return word
def __lowerCAmelCase ( self , snake_case_ ) -> Any:
_a = []
for token in re.findall(self.pat , lowerCamelCase__ ):
_a = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase__ ).split(" " ) )
return bpe_tokens
def __lowerCAmelCase ( self , snake_case_ ) -> List[Any]:
return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token ) )
def __lowerCAmelCase ( self , snake_case_ ) -> str:
return self.decoder.get(lowerCamelCase__ )
def __lowerCAmelCase ( self , snake_case_ ) -> List[Any]:
_a = "".join(lowerCamelCase__ )
_a = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> Union[str, Any]:
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_a = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
_a = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase__ , ensure_ascii=lowerCamelCase__ ) + "\n" )
_a = 0
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
_a = token_index
writer.write(" ".join(lowerCamelCase__ ) + "\n" )
index += 1
return vocab_file, merge_file
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> Optional[Any]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a = [self.cls_token_id]
_a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = False ) -> Optional[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> List[str]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self , snake_case_ , snake_case_=False , **snake_case_ ) -> Optional[Any]:
_a = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase__ ) > 0 and not text[0].isspace()):
_a = " " + text
return (text, kwargs)
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = PaddingStrategy.DO_NOT_PAD , snake_case_ = None , snake_case_ = None , ) -> Tuple:
_a = super()._pad(
encoded_inputs=lowerCamelCase__ , max_length=lowerCamelCase__ , padding_strategy=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
# Load from model defaults
if return_attention_mask is None:
_a = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_a = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_a = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase__ )
if needs_to_be_padded:
_a = len(lowerCamelCase__ ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_a = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
_a = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 131 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class A__ :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str=13 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Any=24 , lowerCamelCase__ : Optional[Any]=16 , lowerCamelCase__ : int=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[Any]=32 , lowerCamelCase__ : List[str]=5 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Optional[Any]=37 , lowerCamelCase__ : Any="gelu" , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : str=10 , lowerCamelCase__ : Optional[Any]=0.02 , lowerCamelCase__ : str=None , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : Optional[Any]=2 , ):
a__ : str = parent
a__ : Any = batch_size
a__ : Dict = patch_size
a__ : List[Any] = max_length
a__ : str = num_mel_bins
a__ : Optional[Any] = is_training
a__ : Optional[int] = use_labels
a__ : List[Any] = hidden_size
a__ : str = num_hidden_layers
a__ : Any = num_attention_heads
a__ : Union[str, Any] = intermediate_size
a__ : List[str] = hidden_act
a__ : str = hidden_dropout_prob
a__ : Tuple = attention_probs_dropout_prob
a__ : List[Any] = type_sequence_label_size
a__ : Any = initializer_range
a__ : str = scope
a__ : List[str] = frequency_stride
a__ : Union[str, Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
a__ : List[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
a__ : List[str] = (self.max_length - self.patch_size) // self.time_stride + 1
a__ : Tuple = frequency_out_dimension * time_out_dimension
a__ : List[str] = num_patches + 2
def _UpperCamelCase( self : List[str] ):
a__ : Any = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
a__ : List[Any] = None
if self.use_labels:
a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : List[str] = self.get_config()
return config, input_values, labels
def _UpperCamelCase( self : Optional[int] ):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] ):
a__ : List[Any] = ASTModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : Dict = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase( self : str ):
a__ : Dict = self.prepare_config_and_inputs()
(
(
a__
), (
a__
), (
a__
),
) : Optional[int] = config_and_inputs
a__ : List[Any] = {"input_values": input_values}
return config, inputs_dict
@require_torch
class A__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
_lowercase = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
_lowercase = False
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _UpperCamelCase( self : str ):
a__ : str = ASTModelTester(self )
a__ : Any = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def _UpperCamelCase( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def _UpperCamelCase( self : List[str] ):
pass
def _UpperCamelCase( self : Optional[int] ):
a__, a__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Any = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def _UpperCamelCase( self : Tuple ):
a__, a__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Dict = model_class(lowerCamelCase__ )
a__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Optional[int] = [*signature.parameters.keys()]
a__ : Optional[Any] = ["input_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
a__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
@slow
def _UpperCamelCase( self : int ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Union[str, Any] = ASTModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def UpperCamelCase_ ( ) -> Any:
a__ : Optional[int] = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" )
a__, a__ : List[str] = torchaudio.load(__a )
return audio, sampling_rate
@require_torch
@require_torchaudio
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase( self : List[str] ):
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def _UpperCamelCase( self : Optional[int] ):
a__ : int = self.default_feature_extractor
a__ : Optional[Any] = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(lowerCamelCase__ )
a__ : Any = self.default_feature_extractor
a__, a__ : Dict = prepare_audio()
a__ : str = audio.squeeze().numpy()
a__ : Any = feature_extractor(lowerCamelCase__ , sampling_rate=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Any = model(**lowerCamelCase__ )
# verify the logits
a__ : Union[str, Any] = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
a__ : List[str] = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 37 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 77 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class A__ ( A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = XGLMTokenizer
_lowercase = XGLMTokenizerFast
_lowercase = True
_lowercase = True
def _UpperCamelCase( self : List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
a__ : str = XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCamelCase( self : List[Any] ):
a__ : int = "<pad>"
a__ : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
a__ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(len(lowerCamelCase__ ) , 1_008 )
def _UpperCamelCase( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_008 )
def _UpperCamelCase( self : Optional[int] ):
a__ : str = XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
a__ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
a__ : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a__ : List[str] = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
a__ : Dict = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _UpperCamelCase( self : Dict ):
return XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
def _UpperCamelCase( self : Union[str, Any] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCamelCase__ , f.name )
a__ : Any = XGLMTokenizer(f.name , keep_accents=lowerCamelCase__ )
a__ : List[str] = pickle.dumps(lowerCamelCase__ )
pickle.loads(lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
if not self.test_rust_tokenizer:
return
a__ : Any = self.get_tokenizer()
a__ : Optional[Any] = self.get_rust_tokenizer()
a__ : Tuple = "I was born in 92000, and this is falsé."
a__ : List[str] = tokenizer.tokenize(lowerCamelCase__ )
a__ : Union[str, Any] = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : Optional[int] = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
a__ : Union[str, Any] = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : List[str] = self.get_rust_tokenizer()
a__ : Tuple = tokenizer.encode(lowerCamelCase__ )
a__ : Optional[Any] = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def _UpperCamelCase( self : List[str] ):
a__ : Union[str, Any] = "Hello World!"
a__ : List[str] = [2, 31_227, 4_447, 35]
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def _UpperCamelCase( self : Union[str, Any] ):
a__ : Optional[int] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
a__ : Union[str, Any] = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def _UpperCamelCase( self : List[Any] ):
# fmt: off
a__ : Optional[int] = {
"input_ids": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name="facebook/xglm-564M" , padding=lowerCamelCase__ , )
| 37 | 0 |
"""simple docstring"""
from __future__ import annotations
def __lowerCamelCase ( a_ : Optional[int] ) -> int:
__SCREAMING_SNAKE_CASE :Optional[int] = len(__a ) // 2
# choose the middle 3 elements
__SCREAMING_SNAKE_CASE :Union[str, Any] = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod() | 498 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCamelCase_ ( ) -> int:
a__ : int = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"
a__ : Optional[Any] = Image.open(requests.get(__a , stream=__a ).raw ).convert("RGB" )
return image
def UpperCamelCase_ ( __a ) -> Optional[Any]:
a__ : Any = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") )
# fmt: on
return rename_keys
def UpperCamelCase_ ( __a , __a , __a ) -> List[str]:
a__ : Union[str, Any] = dct.pop(__a )
a__ : List[str] = val
def UpperCamelCase_ ( __a , __a ) -> Optional[Any]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
a__ : Any = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
a__ : Tuple = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
a__ : str = torch.cat((q_bias, torch.zeros_like(__a , requires_grad=__a ), v_bias) )
a__ : int = qkv_bias
def UpperCamelCase_ ( __a ) -> Dict:
a__ : Tuple = 364 if "coco" in model_name else 224
a__ : int = InstructBlipVisionConfig(image_size=__a ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
a__ : Tuple = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
a__ : Dict = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
a__ : List[Any] = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" , vocab_size=32_001 ).to_dict()
elif "vicuna-13b" in model_name:
a__ : Optional[int] = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" , vocab_size=32_001 ).to_dict()
else:
raise ValueError("Model name not supported" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
a__ : Optional[Any] = InstructBlipQFormerConfig(vocab_size=30_523 ).to_dict()
a__ : Any = InstructBlipConfig(vision_config=__a , text_config=__a , qformer_config=__a )
return config, image_size
@torch.no_grad()
def UpperCamelCase_ ( __a , __a=None , __a=False ) -> int:
a__ : Tuple = AutoTokenizer.from_pretrained("bert-base-uncased" , truncation_side="left" )
qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} )
if "t5" in model_name:
a__ : List[Any] = TaTokenizerFast.from_pretrained("google/flan-t5-xl" , truncation_side="left" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
a__ : Union[str, Any] = LlamaTokenizerFast.from_pretrained(
"huggyllama/llama-7b" , truncation_side="left" , bos_token="</s>" , unk_token="</s>" )
tokenizer.add_special_tokens({"pad_token": "[PAD]"} )
a__, a__ : List[str] = get_blipa_config(__a )
a__ : Any = InstructBlipForConditionalGeneration(__a ).eval()
a__ : Dict = {
"instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"),
"instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"),
"instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"),
"instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"),
}
a__, a__ : Dict = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
a__ : Optional[Any] = "cuda:1" if torch.cuda.is_available() else "cpu"
a__ : List[Any] = "cuda:2" if torch.cuda.is_available() else "cpu"
a__, a__, a__ : Tuple = load_model_and_preprocess(
name=__a , model_type=__a , is_eval=__a , device=__a )
original_model.eval()
print("Done!" )
# update state dict keys
a__ : Dict = original_model.state_dict()
a__ : Optional[int] = create_rename_keys(__a )
for src, dest in rename_keys:
rename_key(__a , __a , __a )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
a__ : Optional[int] = state_dict.pop(__a )
if key.startswith("Qformer.bert" ):
a__ : List[Any] = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
a__ : Any = key.replace("self" , "attention" )
if "llm_proj" in key:
a__ : Dict = key.replace("llm_proj" , "language_projection" )
if "t5_proj" in key:
a__ : int = key.replace("t5_proj" , "language_projection" )
if key.startswith("llm_model" ):
a__ : List[str] = key.replace("llm_model" , "language_model" )
if key.startswith("t5" ):
a__ : str = key.replace("t5" , "language" )
a__ : Dict = val
# read in qv biases
read_in_q_v_bias(__a , __a )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(__a , strict=__a )
a__ : Union[str, Any] = load_demo_image()
a__ : int = "What is unusual about this image?"
# create processor
a__ : Any = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=__a , image_std=__a )
a__ : Tuple = InstructBlipProcessor(
image_processor=__a , tokenizer=__a , qformer_tokenizer=__a , )
a__ : Tuple = processor(images=__a , text=__a , return_tensors="pt" ).to(__a )
# make sure processor creates exact same pixel values
a__ : Optional[int] = vis_processors["eval"](__a ).unsqueeze(0 ).to(__a )
a__ : Optional[Any] = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , __a )
original_model.to(__a )
hf_model.to(__a )
with torch.no_grad():
if "vicuna" in model_name:
a__ : str = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits
a__ : List[str] = hf_model(**__a ).logits
else:
a__ : List[Any] = original_model(
{"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits
a__ : str = tokenizer("\n" , return_tensors="pt" ).input_ids.to(__a )
a__ : Dict = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
a__ : Any = hf_model(**__a , labels=__a ).logits
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
a__ : Tuple = 1e-4 if "vicuna" in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) , __a , atol=__a )
print("Looks ok!" )
print("Generating with original model..." )
a__ : Tuple = original_model.generate({"image": original_pixel_values, "prompt": prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("Generating with HF model..." )
a__ : int = hf_model.generate(
**__a , do_sample=__a , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
a__ : int = 2
print("Original generation:" , __a )
a__ : str = processor.batch_decode(__a , skip_special_tokens=__a )
a__ : str = [text.strip() for text in output_text]
print("HF generation:" , __a )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__a )
hf_model.save_pretrained(__a )
if push_to_hub:
processor.push_to_hub(f'''Salesforce/{model_name}''' )
hf_model.push_to_hub(f'''Salesforce/{model_name}''' )
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
UpperCamelCase : Optional[int] = [
"""instructblip-vicuna-7b""",
"""instructblip-vicuna-13b""",
"""instructblip-flan-t5-xl""",
"""instructblip-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""instructblip-flan-t5-xl""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
UpperCamelCase : Dict = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 37 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE_ = TypeVar('T')
class a ( Generic[T] ):
"""simple docstring"""
def __init__( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: int = data
__UpperCAmelCase: List[Any] = self
__UpperCAmelCase: Optional[Any] = 0
class a ( Generic[T] ):
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
__UpperCAmelCase: dict[T, DisjointSetTreeNode[T]] = {}
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: str = DisjointSetTreeNode(lowerCamelCase__ )
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = self.map[data]
if elem_ref != elem_ref.parent:
__UpperCAmelCase: Optional[int] = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def lowercase_ ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
if nodea.rank > nodea.rank:
__UpperCAmelCase: Tuple = nodea
else:
__UpperCAmelCase: Any = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def lowercase_ ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
self.link(self.find_set(lowerCamelCase__ ) , self.find_set(lowerCamelCase__ ) )
class a ( Generic[T] ):
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
__UpperCAmelCase: dict[T, dict[T, int]] = {}
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
if node not in self.connections:
__UpperCAmelCase: List[str] = {}
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
self.add_node(lowerCamelCase__ )
self.add_node(lowerCamelCase__ )
__UpperCAmelCase: Tuple = weight
__UpperCAmelCase: Union[str, Any] = weight
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = []
__UpperCAmelCase: Dict = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda snake_case_ : x[2] )
# creating the disjoint set
__UpperCAmelCase: Optional[Any] = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(lowerCamelCase__ )
# MST generation
__UpperCAmelCase: List[Any] = 0
__UpperCAmelCase: Union[str, Any] = 0
__UpperCAmelCase: Dict = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
__UpperCAmelCase: str = edges[index]
index += 1
__UpperCAmelCase: str = disjoint_set.find_set(lowerCamelCase__ )
__UpperCAmelCase: List[str] = disjoint_set.find_set(lowerCamelCase__ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
disjoint_set.union(lowerCamelCase__ , lowerCamelCase__ )
return graph | 523 |
def UpperCamelCase_ ( __a , __a ) -> Tuple:
a__ : Optional[int] = [0 for i in range(r + 1 )]
# nc0 = 1
a__ : Union[str, Any] = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
a__ : Any = min(__a , __a )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 37 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : Dict = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__lowerCAmelCase : Optional[Any] = 250_004
__lowerCAmelCase : Dict = 250_020
@require_sentencepiece
@require_tokenizers
class A ( A__ , unittest.TestCase ):
a_ = MBartaaTokenizer
a_ = MBartaaTokenizerFast
a_ = True
a_ = True
def snake_case__ ( self : str ) -> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase = MBartaaTokenizer(lowerCamelCase__ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self : Any ) -> Optional[Any]:
__UpperCAmelCase = "<s>"
__UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def snake_case__ ( self : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCamelCase__ ) , 1_0_5_4 )
def snake_case__ ( self : Union[str, Any] ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_5_4 )
def snake_case__ ( self : Dict ) -> Any:
__UpperCAmelCase = MBartaaTokenizer(lowerCamelCase__ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=lowerCamelCase__ )
__UpperCAmelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
__UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase__ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
__UpperCAmelCase = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def snake_case__ ( self : Optional[int] ) -> Optional[Any]:
# fmt: off
__UpperCAmelCase = {"input_ids": [[2_5_0_0_0_4, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [2_5_0_0_0_4, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_0_0_0_4, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , )
def snake_case__ ( self : Union[str, Any] ) -> Union[str, Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__UpperCAmelCase = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
__UpperCAmelCase = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = tokenizer_r.save_pretrained(lowerCamelCase__ )
__UpperCAmelCase = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
__UpperCAmelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCamelCase__ , lowerCamelCase__ )
# Checks everything loads correctly in the same way
__UpperCAmelCase = tokenizer_r.from_pretrained(lowerCamelCase__ )
__UpperCAmelCase = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__ , lowerCamelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase__ )
# Save tokenizer rust, legacy_format=True
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = tokenizer_r.save_pretrained(lowerCamelCase__ , legacy_format=lowerCamelCase__ )
__UpperCAmelCase = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase__ , lowerCamelCase__ )
# Checks everything loads correctly in the same way
__UpperCAmelCase = tokenizer_r.from_pretrained(lowerCamelCase__ )
__UpperCAmelCase = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__ , lowerCamelCase__ ) )
shutil.rmtree(lowerCamelCase__ )
# Save tokenizer rust, legacy_format=False
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = tokenizer_r.save_pretrained(lowerCamelCase__ , legacy_format=lowerCamelCase__ )
__UpperCAmelCase = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__UpperCAmelCase = tokenizer_r.from_pretrained(lowerCamelCase__ )
__UpperCAmelCase = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__ , lowerCamelCase__ ) )
shutil.rmtree(lowerCamelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
a_ = '''facebook/mbart-large-50-one-to-many-mmt'''
a_ = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
a_ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
a_ = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2]
@classmethod
def snake_case__ ( cls : List[Any] ) -> List[str]:
__UpperCAmelCase = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
__UpperCAmelCase = 1
return cls
def snake_case__ ( self : Dict ) -> int:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 2_5_0_0_2_0 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 2_5_0_0_3_8 )
def snake_case__ ( self : int ) -> Optional[int]:
__UpperCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase__ )
def snake_case__ ( self : Optional[Any] ) -> str:
self.assertIn(lowerCamelCase__ , self.tokenizer.all_special_ids )
__UpperCAmelCase = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
__UpperCAmelCase = self.tokenizer.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
__UpperCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase__ )
def snake_case__ ( self : str ) -> Optional[int]:
__UpperCAmelCase = ["this is gunna be a long sentence " * 2_0]
assert isinstance(src_text[0] , lowerCamelCase__ )
__UpperCAmelCase = 1_0
__UpperCAmelCase = self.tokenizer(lowerCamelCase__ , max_length=lowerCamelCase__ , truncation=lowerCamelCase__ ).input_ids[0]
self.assertEqual(ids[0] , lowerCamelCase__ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
def snake_case__ ( self : Tuple ) -> List[Any]:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [2_5_0_0_5_3, 2_5_0_0_0_1] )
def snake_case__ ( self : str ) -> List[str]:
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase__ )
__UpperCAmelCase = MBartaaTokenizer.from_pretrained(lowerCamelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCamelCase__ )
@require_torch
def snake_case__ ( self : List[Any] ) -> Optional[int]:
__UpperCAmelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCamelCase__ , return_tensors='''pt''' )
__UpperCAmelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def snake_case__ ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
__UpperCAmelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
__UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase__ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def snake_case__ ( self : List[Any] ) -> Dict:
__UpperCAmelCase = self.tokenizer(self.src_text , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=3 , return_tensors='''pt''' )
__UpperCAmelCase = self.tokenizer(
text_target=self.tgt_text , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=1_0 , return_tensors='''pt''' )
__UpperCAmelCase = targets["input_ids"]
__UpperCAmelCase = shift_tokens_right(lowerCamelCase__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def snake_case__ ( self : Optional[Any] ) -> Any:
__UpperCAmelCase = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCamelCase__ ) , {
# en_XX, A, test, EOS
'''input_ids''': [[2_5_0_0_0_4, 6_2, 3_0_3_4, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 2_5_0_0_0_1,
} , )
| 262 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase : Optional[Any] = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
UpperCamelCase : Dict = {
"""allenai/led-base-16384""": 1_6384,
}
class A__ ( A__ ):
"""simple docstring"""
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = LEDTokenizer
_lowercase = ['input_ids', 'attention_mask']
def __init__( self : Tuple , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : int="replace" , lowerCamelCase__ : Union[str, Any]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Tuple="</s>" , lowerCamelCase__ : Optional[int]="<s>" , lowerCamelCase__ : str="<unk>" , lowerCamelCase__ : Any="<pad>" , lowerCamelCase__ : Any="<mask>" , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : int=True , **lowerCamelCase__ : Union[str, Any] , ):
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , )
a__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space:
a__ : List[str] = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) )
a__ : Optional[Any] = add_prefix_space
a__ : List[str] = pre_tok_class(**lowerCamelCase__ )
a__ : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
a__ : Any = "post_processor"
a__ : str = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
if tokenizer_component_instance:
a__ : Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a__ : Optional[Any] = tuple(state["sep"] )
if "cls" in state:
a__ : Optional[Any] = tuple(state["cls"] )
a__ : Optional[int] = False
if state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space:
a__ : Dict = add_prefix_space
a__ : int = True
if state.get("trim_offsets" , lowerCamelCase__ ) != trim_offsets:
a__ : List[Any] = trim_offsets
a__ : List[str] = True
if changes_to_apply:
a__ : int = getattr(lowerCamelCase__ , state.pop("type" ) )
a__ : int = component_class(**lowerCamelCase__ )
setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _UpperCamelCase( self : Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Union[str, Any] ):
a__ : Any = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value
a__ : Union[str, Any] = value
def _UpperCamelCase( self : Any , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ):
a__ : List[str] = kwargs.get("is_split_into_words" , lowerCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : Any , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Optional[Any] ):
a__ : Dict = kwargs.get("is_split_into_words" , lowerCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ):
a__ : List[str] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=None ):
a__ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ):
a__ : List[str] = [self.sep_token_id]
a__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase( self : Dict , lowerCamelCase__ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , ):
a__ : str = super()._pad(
encoded_inputs=lowerCamelCase__ , max_length=lowerCamelCase__ , padding_strategy=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
# Load from model defaults
if return_attention_mask is None:
a__ : Optional[int] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
a__ : Tuple = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
a__ : Dict = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase__ )
if needs_to_be_padded:
a__ : Union[str, Any] = len(lowerCamelCase__ ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
a__ : List[Any] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
a__ : Any = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 37 | 0 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def snake_case ( a_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x20000 and cp <= 0x2a6df) #
or (cp >= 0x2a700 and cp <= 0x2b73f) #
or (cp >= 0x2b740 and cp <= 0x2b81f) #
or (cp >= 0x2b820 and cp <= 0x2ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2f800 and cp <= 0x2fa1f) #
): #
return True
return False
def snake_case ( a_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
for char in word:
UpperCamelCase_ : Optional[Any] = ord(__a )
if not _is_chinese_char(__a ):
return 0
return 1
def snake_case ( a_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = set()
for token in tokens:
UpperCamelCase_ : Optional[Any] = len(__a ) > 1 and is_chinese(__a )
if chinese_word:
word_set.add(__a )
UpperCamelCase_ : Optional[Any] = list(__a )
return word_list
def snake_case ( a_ : Optional[int] , a_ : str ) -> Optional[int]:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
UpperCamelCase_ : int = max([len(__a ) for w in chinese_word_set] )
UpperCamelCase_ : Tuple = bert_tokens
UpperCamelCase_ : Union[str, Any] = 0, len(__a )
while start < end:
UpperCamelCase_ : Tuple = True
if is_chinese(bert_word[start] ):
UpperCamelCase_ : Optional[int] = min(end - start , __a )
for i in range(__a , 1 , -1 ):
UpperCamelCase_ : Tuple = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
UpperCamelCase_ : Union[str, Any] = "##" + bert_word[j]
UpperCamelCase_ : str = start + i
UpperCamelCase_ : Optional[Any] = False
break
if single_word:
start += 1
return bert_word
def snake_case ( a_ : Dict , a_ : Tuple , a_ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Any = []
for i in range(0 , len(__a ) , 100 ):
UpperCamelCase_ : List[str] = ltp_tokenizer.seg(lines[i : i + 100] )[0]
UpperCamelCase_ : Optional[Any] = [get_chinese_word(__a ) for r in res]
ltp_res.extend(__a )
assert len(__a ) == len(__a )
UpperCamelCase_ : List[Any] = []
for i in range(0 , len(__a ) , 100 ):
UpperCamelCase_ : List[Any] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__a , truncation=__a , max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(__a ) == len(__a )
UpperCamelCase_ : List[Any] = []
for input_ids, chinese_word in zip(__a , __a ):
UpperCamelCase_ : List[Any] = []
for id in input_ids:
UpperCamelCase_ : Tuple = bert_tokenizer._convert_id_to_token(__a )
input_tokens.append(__a )
UpperCamelCase_ : List[str] = add_sub_symbol(__a , __a )
UpperCamelCase_ : Tuple = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__a ):
if token[:2] == "##":
UpperCamelCase_ : Optional[Any] = token[2:]
# save chinese tokens' pos
if len(__a ) == 1 and _is_chinese_char(ord(__a ) ):
ref_id.append(__a )
ref_ids.append(__a )
assert len(__a ) == len(__a )
return ref_ids
def snake_case ( a_ : Any ) -> Dict:
"""simple docstring"""
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
UpperCamelCase_ : int = f.readlines()
UpperCamelCase_ : Dict = [line.strip() for line in data if len(__a ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCamelCase_ : List[Any] = LTP(args.ltp ) # faster in GPU device
UpperCamelCase_ : List[str] = BertTokenizer.from_pretrained(args.bert )
UpperCamelCase_ : Optional[int] = prepare_ref(__a , __a , __a )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
UpperCamelCase_ : List[str] = [json.dumps(__a ) + "\n" for ref in ref_ids]
f.writelines(__a )
if __name__ == "__main__":
UpperCamelCase =argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path"
)
parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer")
parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res")
UpperCamelCase =parser.parse_args()
main(args)
| 208 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase : Union[str, Any] = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
UpperCamelCase : List[str] = {
"""roberta-base""": 512,
"""roberta-large""": 512,
"""roberta-large-mnli""": 512,
"""distilroberta-base""": 512,
"""roberta-base-openai-detector""": 512,
"""roberta-large-openai-detector""": 512,
}
class A__ ( A__ ):
"""simple docstring"""
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = ['input_ids', 'attention_mask']
_lowercase = RobertaTokenizer
def __init__( self : List[str] , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]="replace" , lowerCamelCase__ : List[str]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Any="</s>" , lowerCamelCase__ : Any="<s>" , lowerCamelCase__ : int="<unk>" , lowerCamelCase__ : Any="<pad>" , lowerCamelCase__ : Tuple="<mask>" , lowerCamelCase__ : Any=False , lowerCamelCase__ : Dict=True , **lowerCamelCase__ : Optional[Any] , ):
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , )
a__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space:
a__ : Any = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) )
a__ : int = add_prefix_space
a__ : Tuple = pre_tok_class(**lowerCamelCase__ )
a__ : str = add_prefix_space
a__ : Tuple = "post_processor"
a__ : Dict = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
if tokenizer_component_instance:
a__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a__ : Tuple = tuple(state["sep"] )
if "cls" in state:
a__ : str = tuple(state["cls"] )
a__ : str = False
if state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space:
a__ : str = add_prefix_space
a__ : Any = True
if state.get("trim_offsets" , lowerCamelCase__ ) != trim_offsets:
a__ : int = trim_offsets
a__ : Dict = True
if changes_to_apply:
a__ : Union[str, Any] = getattr(lowerCamelCase__ , state.pop("type" ) )
a__ : str = component_class(**lowerCamelCase__ )
setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
@property
def _UpperCamelCase( self : Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Tuple ):
a__ : List[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value
a__ : List[str] = value
def _UpperCamelCase( self : Union[str, Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : int ):
a__ : Optional[int] = kwargs.get("is_split_into_words" , lowerCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : Tuple , *lowerCamelCase__ : Dict , **lowerCamelCase__ : List[str] ):
a__ : Dict = kwargs.get("is_split_into_words" , lowerCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : str , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ):
a__ : int = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int]=None ):
a__ : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCamelCase( self : Dict , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ):
a__ : Tuple = [self.sep_token_id]
a__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 37 | 0 |
from itertools import permutations
def UpperCAmelCase_ ( _UpperCAmelCase ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCamelCase_: Tuple = [7, 1_1, 1_3, 1_7]
for i, test in enumerate(__a ):
if (num[i + 4] * 1_0_0 + num[i + 5] * 1_0 + num[i + 6]) % test != 0:
return False
return True
def UpperCAmelCase_ ( _UpperCAmelCase = 1_0 ):
return sum(
int("""""".join(map(__a , __a ) ) )
for num in permutations(range(__a ) )
if is_substring_divisible(__a ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 423 |
from statistics import mean, stdev
def UpperCamelCase_ ( __a , __a = 3 ) -> list:
a__ : List[str] = min(__a )
a__ : str = max(__a )
# normalize data
return [round((x - x_min) / (x_max - x_min) , __a ) for x in data]
def UpperCamelCase_ ( __a , __a = 3 ) -> list:
a__ : str = mean(__a )
a__ : List[str] = stdev(__a )
# standardize data
return [round((x - mu) / (sigma) , __a ) for x in data]
| 37 | 0 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Any, lowerCAmelCase_ : Any ):
__lowerCAmelCase = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
__lowerCAmelCase = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__a ):
os.makedirs(__a )
__lowerCAmelCase = model.state_dict()
def to_tf_var_name(lowerCAmelCase_ : int ):
for patt, repl in iter(__a ):
__lowerCAmelCase = name.replace(__a, __a )
return F"""bert/{name}"""
def create_tf_var(lowerCAmelCase_ : Dict, lowerCAmelCase_ : Dict, lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = tf.dtypes.as_dtype(tensor.dtype )
__lowerCAmelCase = tf.get_variable(dtype=__a, shape=tensor.shape, name=__a, initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__a )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__lowerCAmelCase = to_tf_var_name(__a )
__lowerCAmelCase = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__lowerCAmelCase = torch_tensor.T
__lowerCAmelCase = create_tf_var(tensor=__a, name=__a, session=__a )
tf.keras.backend.set_value(__a, __a )
__lowerCAmelCase = session.run(__a )
print(F"""Successfully created {tf_name}: {np.allclose(__a, __a )}""" )
__lowerCAmelCase = tf.train.Saver(tf.trainable_variables() )
saver.save(__a, os.path.join(__a, model_name.replace('-', '_' ) + '.ckpt' ) )
def a_ ( lowerCAmelCase_ : int=None ):
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--model_name', type=__a, required=__a, help='model name e.g. bert-base-uncased' )
parser.add_argument(
'--cache_dir', type=__a, default=__a, required=__a, help='Directory containing pytorch model' )
parser.add_argument('--pytorch_model_path', type=__a, required=__a, help='/path/to/<pytorch-model-name>.bin' )
parser.add_argument('--tf_cache_dir', type=__a, required=__a, help='Directory in which to save tensorflow model' )
__lowerCAmelCase = parser.parse_args(__a )
__lowerCAmelCase = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name, state_dict=torch.load(args.pytorch_model_path ), cache_dir=args.cache_dir, )
convert_pytorch_checkpoint_to_tf(model=__a, ckpt_dir=args.tf_cache_dir, model_name=args.model_name )
if __name__ == "__main__":
main()
| 53 |
def UpperCamelCase_ ( __a = 50 ) -> int:
a__ : Tuple = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 37 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class A_ (datasets.BeamBasedBuilder ):
def _lowercase ( self ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=lowerCamelCase__ , )
def _lowercase ( self , _A , _A ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )]
def _lowercase ( self , _A , _A ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCamelCase__ )
class A_ (datasets.BeamBasedBuilder ):
def _lowercase ( self ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=lowerCamelCase__ , )
def _lowercase ( self , _A , _A ):
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} )
]
def _lowercase ( self , _A , _A ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCamelCase__ )
def __SCREAMING_SNAKE_CASE ( ) -> int:
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class A_ (A__ ):
@require_beam
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase = DummyBeamDataset(cache_dir=lowerCamelCase__ , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCamelCase__ , builder.name , '''default''' , '''0.0.0''' , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
UpperCAmelCase = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , lowerCamelCase__ )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , lowerCamelCase__ )
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase__ , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def _lowercase ( self ):
'''simple docstring'''
import apache_beam as beam
UpperCAmelCase = beam.io.parquetio.WriteToParquet
UpperCAmelCase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase = DummyBeamDataset(cache_dir=lowerCamelCase__ , beam_runner='''DirectRunner''' )
with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock:
UpperCAmelCase = partial(lowerCamelCase__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
lowerCamelCase__ , builder.name , '''default''' , '''0.0.0''' , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
lowerCamelCase__ , builder.name , '''default''' , '''0.0.0''' , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
UpperCAmelCase = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , lowerCamelCase__ )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , lowerCamelCase__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase__ , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def _lowercase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase = DummyBeamDataset(cache_dir=lowerCamelCase__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase = NestedBeamDataset(cache_dir=lowerCamelCase__ , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCamelCase__ , builder.name , '''default''' , '''0.0.0''' , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) )
UpperCAmelCase = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , lowerCamelCase__ )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , lowerCamelCase__ )
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase__ , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
| 130 |
class A__ :
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] ):
a__ : str = name
a__ : Optional[int] = value
a__ : Dict = weight
def __repr__( self : Union[str, Any] ):
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def _UpperCamelCase( self : Dict ):
return self.value
def _UpperCamelCase( self : Optional[Any] ):
return self.name
def _UpperCamelCase( self : Optional[Any] ):
return self.weight
def _UpperCamelCase( self : Optional[int] ):
return self.value / self.weight
def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]:
a__ : Optional[Any] = []
for i in range(len(__a ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def UpperCamelCase_ ( __a , __a , __a ) -> Union[str, Any]:
a__ : List[str] = sorted(__a , key=__a , reverse=__a )
a__ : List[Any] = []
a__, a__ : Union[str, Any] = 0.0, 0.0
for i in range(len(__a ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def UpperCamelCase_ ( ) -> Union[str, Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37 | 0 |
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = 10
lowerCAmelCase__ = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
lowerCAmelCase__ = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [97], """text""": ["""1976"""]}] * 10,
"""id""": list(range(__a ) ),
} , features=__a , )
return dataset
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=__a )
return filename
# FILE_CONTENT + files
__lowerCAmelCase : Tuple = """\
Text data.
Second line of data."""
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = tmp_path_factory.mktemp("""data""" ) / "file.txt"
lowerCAmelCase__ = FILE_CONTENT
with open(__a , """w""" ) as f:
f.write(__a )
return filename
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
import bza
lowerCAmelCase__ = tmp_path_factory.mktemp("""data""" ) / "file.txt.bz2"
lowerCAmelCase__ = bytes(__a , """utf-8""" )
with bza.open(__a , """wb""" ) as f:
f.write(__a )
return path
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
import gzip
lowerCAmelCase__ = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
lowerCAmelCase__ = bytes(__a , """utf-8""" )
with gzip.open(__a , """wb""" ) as f:
f.write(__a )
return path
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
lowerCAmelCase__ = tmp_path_factory.mktemp("""data""" ) / "file.txt.lz4"
lowerCAmelCase__ = bytes(__a , """utf-8""" )
with lza.frame.open(__a , """wb""" ) as f:
f.write(__a )
return path
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
lowerCAmelCase__ = tmp_path_factory.mktemp("""data""" ) / "file.txt.7z"
with pyazr.SevenZipFile(__a , """w""" ) as archive:
archive.write(__a , arcname=os.path.basename(__a ) )
return path
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
import tarfile
lowerCAmelCase__ = tmp_path_factory.mktemp("""data""" ) / "file.txt.tar"
with tarfile.TarFile(__a , """w""" ) as f:
f.add(__a , arcname=os.path.basename(__a ) )
return path
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
import lzma
lowerCAmelCase__ = tmp_path_factory.mktemp("""data""" ) / "file.txt.xz"
lowerCAmelCase__ = bytes(__a , """utf-8""" )
with lzma.open(__a , """wb""" ) as f:
f.write(__a )
return path
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
import zipfile
lowerCAmelCase__ = tmp_path_factory.mktemp("""data""" ) / "file.txt.zip"
with zipfile.ZipFile(__a , """w""" ) as f:
f.write(__a , arcname=os.path.basename(__a ) )
return path
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
lowerCAmelCase__ = tmp_path_factory.mktemp("""data""" ) / "file.txt.zst"
lowerCAmelCase__ = bytes(__a , """utf-8""" )
with zstd.open(__a , """wb""" ) as f:
f.write(__a )
return path
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = tmp_path_factory.mktemp("""data""" ) / "file.xml"
lowerCAmelCase__ = textwrap.dedent(
"""\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>""" )
with open(__a , """w""" ) as f:
f.write(__a )
return filename
__lowerCAmelCase : Tuple = [
{"""col_1""": """0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """3""", """col_2""": 3, """col_3""": 3.0},
]
__lowerCAmelCase : Any = [
{"""col_1""": """4""", """col_2""": 4, """col_3""": 4.0},
{"""col_1""": """5""", """col_2""": 5, """col_3""": 5.0},
]
__lowerCAmelCase : str = {
"""col_1""": ["""0""", """1""", """2""", """3"""],
"""col_2""": [0, 1, 2, 3],
"""col_3""": [0.0, 1.0, 2.0, 3.0],
}
__lowerCAmelCase : str = [
{"""col_3""": 0.0, """col_1""": """0""", """col_2""": 0},
{"""col_3""": 1.0, """col_1""": """1""", """col_2""": 1},
]
__lowerCAmelCase : List[str] = [
{"""col_1""": """s0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """s1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """s2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """s3""", """col_2""": 3, """col_3""": 3.0},
]
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( ):
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = datasets.Dataset.from_dict(__a )
lowerCAmelCase__ = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=__a )
return path
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(__a ) ) as con:
lowerCAmelCase__ = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(__a , """w""" , newline="""""" ) as f:
lowerCAmelCase__ = csv.DictWriter(__a , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(__a )
return path
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(__a , """w""" , newline="""""" ) as f:
lowerCAmelCase__ = csv.DictWriter(__a , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(__a )
return path
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
import bza
lowerCAmelCase__ = tmp_path_factory.mktemp("""data""" ) / "dataset.csv.bz2"
with open(__a , """rb""" ) as f:
lowerCAmelCase__ = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__a , """wb""" ) as f:
f.write(__a )
return path
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = tmp_path_factory.mktemp("""data""" ) / "dataset.csv.zip"
with zipfile.ZipFile(__a , """w""" ) as f:
f.write(__a , arcname=os.path.basename(__a ) )
f.write(__a , arcname=os.path.basename(__a ) )
return path
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = tmp_path_factory.mktemp("""data""" ) / "dataset.csv.zip"
with zipfile.ZipFile(__a , """w""" ) as f:
f.write(__a , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(__a , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = tmp_path_factory.mktemp("""data""" ) / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(__a , """w""" ) as f:
f.write(__a , arcname=os.path.join("""main_dir""" , os.path.basename(__a ) ) )
f.write(__a , arcname=os.path.join("""main_dir""" , os.path.basename(__a ) ) )
return path
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
lowerCAmelCase__ = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(__a , """wb""" ) as f:
lowerCAmelCase__ = pq.ParquetWriter(__a , schema=__a )
lowerCAmelCase__ = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__a ) )] for k in DATA[0]} , schema=__a )
writer.write_table(__a )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
lowerCAmelCase__ = {"data": DATA}
with open(__a , """w""" ) as f:
json.dump(__a , __a )
return path
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
lowerCAmelCase__ = {"data": DATA_DICT_OF_LISTS}
with open(__a , """w""" ) as f:
json.dump(__a , __a )
return path
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(__a , """w""" ) as f:
for item in DATA:
f.write(json.dumps(__a ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(__a , """w""" ) as f:
for item in DATA:
f.write(json.dumps(__a ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(__a , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(__a ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(__a , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(__a ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
import gzip
lowerCAmelCase__ = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(__a , """rb""" ) as orig_file:
with gzip.open(__a , """wb""" ) as zipped_file:
zipped_file.writelines(__a )
return path
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
import gzip
lowerCAmelCase__ = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(__a , """rb""" ) as orig_file:
with gzip.open(__a , """wb""" ) as zipped_file:
zipped_file.writelines(__a )
return path
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = tmp_path_factory.mktemp("""data""" ) / "dataset.jsonl.zip"
with zipfile.ZipFile(__a , """w""" ) as f:
f.write(__a , arcname=os.path.basename(__a ) )
f.write(__a , arcname=os.path.basename(__a ) )
return path
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = tmp_path_factory.mktemp("""data""" ) / "dataset_nested.jsonl.zip"
with zipfile.ZipFile(__a , """w""" ) as f:
f.write(__a , arcname=os.path.join("""nested""" , os.path.basename(__a ) ) )
return path
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = tmp_path_factory.mktemp("""data""" ) / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(__a , """w""" ) as f:
f.write(__a , arcname=os.path.join("""main_dir""" , os.path.basename(__a ) ) )
f.write(__a , arcname=os.path.join("""main_dir""" , os.path.basename(__a ) ) )
return path
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = tmp_path_factory.mktemp("""data""" ) / "dataset.jsonl.tar"
with tarfile.TarFile(__a , """w""" ) as f:
f.add(__a , arcname=os.path.basename(__a ) )
f.add(__a , arcname=os.path.basename(__a ) )
return path
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = tmp_path_factory.mktemp("""data""" ) / "dataset_nested.jsonl.tar"
with tarfile.TarFile(__a , """w""" ) as f:
f.add(__a , arcname=os.path.join("""nested""" , os.path.basename(__a ) ) )
return path
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = ["0", "1", "2", "3"]
lowerCAmelCase__ = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(__a , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = ["0", "1", "2", "3"]
lowerCAmelCase__ = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(__a , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = ["0", "1", "2", "3"]
lowerCAmelCase__ = tmp_path_factory.mktemp("""data""" ) / "dataset.abc"
with open(__a , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = tmp_path_factory.mktemp("""data""" ) / "dataset.text.zip"
with zipfile.ZipFile(__a , """w""" ) as f:
f.write(__a , arcname=os.path.basename(__a ) )
f.write(__a , arcname=os.path.basename(__a ) )
return path
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = tmp_path_factory.mktemp("""data""" ) / "dataset_with_dir.text.zip"
with zipfile.ZipFile(__a , """w""" ) as f:
f.write(__a , arcname=os.path.join("""main_dir""" , os.path.basename(__a ) ) )
f.write(__a , arcname=os.path.join("""main_dir""" , os.path.basename(__a ) ) )
return path
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = tmp_path_factory.mktemp("""data""" ) / "dataset.ext.zip"
with zipfile.ZipFile(__a , """w""" ) as f:
f.write(__a , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(__a , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = "\n".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
lowerCAmelCase__ = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(__a , """w""" , encoding="""utf-8""" ) as f:
f.write(__a )
return path
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( ):
"""simple docstring"""
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( ):
"""simple docstring"""
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = tmp_path_factory.mktemp("""data""" ) / "dataset.img.zip"
with zipfile.ZipFile(__a , """w""" ) as f:
f.write(__a , arcname=os.path.basename(__a ) )
f.write(__a , arcname=os.path.basename(__a ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
return data_dir
| 644 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class A__ ( A__ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase__ : Union[str, "sqlalchemy.sql.Selectable"] , lowerCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , **lowerCamelCase__ : Optional[int] , ):
super().__init__(features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , **lowerCamelCase__ )
a__ : str = Sql(
cache_dir=lowerCamelCase__ , features=lowerCamelCase__ , sql=lowerCamelCase__ , con=lowerCamelCase__ , **lowerCamelCase__ , )
def _UpperCamelCase( self : Tuple ):
a__ : Optional[Any] = None
a__ : Dict = None
a__ : Union[str, Any] = None
a__ : Union[str, Any] = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , )
# Build dataset for splits
a__ : List[str] = self.builder.as_dataset(
split="train" , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory )
return dataset
class A__ :
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase__ : Dataset , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : Optional[Any] , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
a__ : Any = dataset
a__ : str = name
a__ : Tuple = con
a__ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
a__ : Any = num_proc
a__ : Tuple = to_sql_kwargs
def _UpperCamelCase( self : List[Any] ):
a__ : Any = self.to_sql_kwargs.pop("sql" , lowerCamelCase__ )
a__ : int = self.to_sql_kwargs.pop("con" , lowerCamelCase__ )
a__ : int = self.to_sql_kwargs.pop("index" , lowerCamelCase__ )
a__ : int = self._write(index=lowerCamelCase__ , **self.to_sql_kwargs )
return written
def _UpperCamelCase( self : Any , lowerCamelCase__ : List[str] ):
a__, a__, a__ : Union[str, Any] = args
a__ : Any = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
a__ : Tuple = query_table(
table=self.dataset.data , key=slice(lowerCamelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , )
a__ : str = batch.to_pandas()
a__ : List[Any] = df.to_sql(self.name , self.con , index=lowerCamelCase__ , **lowerCamelCase__ )
return num_rows or len(lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ):
a__ : str = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
a__, a__ : List[str] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , lowerCamelCase__ , lowerCamelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 37 | 0 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class _UpperCAmelCase ( A__ ,A__ ):
'''simple docstring'''
a__ =1
@register_to_config
def __init__( self , A=2_0_0_0 , A=0.1 , A=2_0 , A=1E-3 ) -> List[str]:
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : Dict = None
_UpperCAmelCase : List[str] = None
def __lowerCAmelCase ( self , A , A = None ) -> List[Any]:
_UpperCAmelCase : Optional[Any] = torch.linspace(1 , self.config.sampling_eps , lowerCamelCase__ , device=lowerCamelCase__ )
def __lowerCAmelCase ( self , A , A , A , A=None ) -> Optional[int]:
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
_UpperCAmelCase : Optional[int] = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
_UpperCAmelCase : Optional[Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
_UpperCAmelCase : Any = std.flatten()
while len(std.shape ) < len(score.shape ):
_UpperCAmelCase : Union[str, Any] = std.unsqueeze(-1 )
_UpperCAmelCase : int = -score / std
# compute
_UpperCAmelCase : List[Any] = -1.0 / len(self.timesteps )
_UpperCAmelCase : List[str] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
_UpperCAmelCase : Any = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
_UpperCAmelCase : int = beta_t.unsqueeze(-1 )
_UpperCAmelCase : Any = -0.5 * beta_t * x
_UpperCAmelCase : Tuple = torch.sqrt(lowerCamelCase__ )
_UpperCAmelCase : int = drift - diffusion**2 * score
_UpperCAmelCase : int = x + drift * dt
# add noise
_UpperCAmelCase : Optional[int] = randn_tensor(x.shape , layout=x.layout , generator=lowerCamelCase__ , device=x.device , dtype=x.dtype )
_UpperCAmelCase : str = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ) -> List[str]:
return self.config.num_train_timesteps
| 506 |
import math
from datetime import datetime, timedelta
def UpperCamelCase_ ( __a ) -> datetime:
a__ : Union[str, Any] = year % 19
a__ : List[str] = year % 4
a__ : str = year % 7
a__ : Any = math.floor(year / 100 )
a__ : List[str] = math.floor((13 + 8 * leap_day_inhibits) / 25 )
a__ : Optional[int] = leap_day_inhibits / 4
a__ : Union[str, Any] = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
a__ : Dict = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
a__ : Any = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
a__ : List[Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__a , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__a , 4 , 18 )
else:
return datetime(__a , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
UpperCamelCase : Tuple = """will be""" if year > datetime.now().year else """was"""
print(f"""Easter in {year} {tense} {gauss_easter(year)}""")
| 37 | 0 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__snake_case : Dict = logging.get_logger(__name__)
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = R"\w+[.]\d+"
_a = re.findall(__a, __a )
for pat in pats:
_a = key.replace(__a, "_".join(pat.split("." ) ) )
return key
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Tuple, lowerCamelCase__ : str ):
_a = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
_a = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
_a = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
_a = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
_a = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
_a = pt_tensor.transpose(2, 3, 1, 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
_a = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
_a = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_a = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_a = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : List[str], lowerCamelCase__ : Union[str, Any]=42 ):
# Step 1: Convert pytorch tensor to numpy
_a = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
_a = flax_model.init_weights(PRNGKey(__a ) )
_a = flatten_dict(__a )
_a = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_a = rename_key(__a )
_a = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
_a = rename_key_and_reshape_tensor(__a, __a, __a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
_a = jnp.asarray(__a )
return unflatten_dict(__a )
| 131 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def UpperCamelCase_ ( __a ) -> Union[str, Any]:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase__ : nn.Module , lowerCamelCase__ : int ):
super().__init__()
a__ : int = module
a__ : Any = nn.Sequential(
nn.Linear(module.in_features , lowerCamelCase__ , bias=lowerCamelCase__ ) , nn.Linear(lowerCamelCase__ , module.out_features , bias=lowerCamelCase__ ) , )
a__ : Tuple = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCamelCase__ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , *lowerCamelCase__ : int , **lowerCamelCase__ : Dict ):
return self.module(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) + self.adapter(lowerCamelCase__ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
"""simple docstring"""
_lowercase = 'bigscience/bloom-1b7'
# Constant values
_lowercase = 2.1_09_65_95_52_69_25_74
_lowercase = 'Hello my name is'
_lowercase = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
_lowercase = 1_0
def _UpperCamelCase( self : Dict ):
# Models and tokenizer
a__ : List[str] = AutoTokenizer.from_pretrained(self.model_name )
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : Union[str, Any] ):
super().setUp()
# Models and tokenizer
a__ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
a__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
def _UpperCamelCase( self : List[Any] ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase( self : List[Any] ):
a__ : str = self.model_abit.config
self.assertTrue(hasattr(lowerCamelCase__ , "quantization_config" ) )
a__ : Optional[Any] = config.to_dict()
a__ : int = config.to_diff_dict()
a__ : List[str] = config.to_json_string()
def _UpperCamelCase( self : int ):
from bitsandbytes.nn import Paramsabit
a__ : List[Any] = self.model_fpaa.get_memory_footprint()
a__ : str = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
a__ : Optional[Any] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def _UpperCamelCase( self : Tuple ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCamelCase__ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def _UpperCamelCase( self : str ):
a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" )
a__ : Tuple = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS )
def _UpperCamelCase( self : List[Any] ):
a__ : Optional[Any] = BitsAndBytesConfig()
a__ : Optional[int] = True
a__ : int = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCamelCase__ , device_map="auto" )
a__ : str = self.tokenizer(self.input_text , return_tensors="pt" )
a__ : int = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS )
def _UpperCamelCase( self : Dict ):
with self.assertRaises(lowerCamelCase__ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCamelCase__ )
def _UpperCamelCase( self : Union[str, Any] ):
a__ : int = BitsAndBytesConfig()
with self.assertRaises(lowerCamelCase__ ):
a__ : Dict = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCamelCase__ , load_in_abit=lowerCamelCase__ , device_map="auto" , bnb_abit_quant_type="nf4" , )
def _UpperCamelCase( self : int ):
with self.assertRaises(lowerCamelCase__ ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(lowerCamelCase__ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCamelCase__ ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(lowerCamelCase__ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCamelCase__ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
a__ : int = self.tokenizer(self.input_text , return_tensors="pt" )
a__ : Any = self.model_fpaa.to(torch.floataa )
a__ : List[Any] = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
a__ : Tuple = self.model_fpaa.to("cpu" )
# Check this does not throw an error
a__ : Tuple = self.model_fpaa.half()
# Check this does not throw an error
a__ : Dict = self.model_fpaa.float()
def _UpperCamelCase( self : Dict ):
a__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=lowerCamelCase__ , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def _UpperCamelCase( cls : str ):
a__ : Dict = "t5-small"
a__ : List[Any] = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
a__ : int = AutoTokenizer.from_pretrained(cls.model_name )
a__ : str = "Translate in German: Hello, my dog is cute"
def _UpperCamelCase( self : Optional[int] ):
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase( self : Optional[int] ):
from transformers import TaForConditionalGeneration
a__ : List[Any] = TaForConditionalGeneration._keep_in_fpaa_modules
a__ : Optional[Any] = None
# test with `t5-small`
a__ : Any = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a__ : int = model.generate(**lowerCamelCase__ )
# test with `flan-t5-small`
a__ : Dict = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a__ : Any = model.generate(**lowerCamelCase__ )
a__ : Union[str, Any] = modules
def _UpperCamelCase( self : List[Any] ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
a__ : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
a__ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a__ : int = model.generate(**lowerCamelCase__ )
# test with `flan-t5-small`
a__ : int = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
a__ : Any = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a__ : Optional[int] = model.generate(**lowerCamelCase__ )
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : List[str] ):
super().setUp()
# model_name
a__ : Union[str, Any] = "bigscience/bloom-560m"
a__ : Union[str, Any] = "t5-small"
# Different types of model
a__ : int = AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
# Sequence classification model
a__ : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
# CausalLM model
a__ : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
# Seq2seq model
a__ : Dict = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
def _UpperCamelCase( self : List[Any] ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase( self : Union[str, Any] ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : Dict ):
super().setUp()
def _UpperCamelCase( self : int ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase( self : Tuple ):
a__ : int = pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
a__ : Tuple = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : Tuple ):
super().setUp()
def _UpperCamelCase( self : List[Any] ):
a__ : str = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCamelCase__ , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
a__ : List[Any] = self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
a__ : List[Any] = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS )
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : Dict ):
a__ : Any = "facebook/opt-350m"
super().setUp()
def _UpperCamelCase( self : int ):
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
a__ : Tuple = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
a__ : Any = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
a__ : Tuple = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCamelCase__ ) ):
a__ : Dict = LoRALayer(module.q_proj , rank=16 )
a__ : List[Any] = LoRALayer(module.k_proj , rank=16 )
a__ : List[Any] = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
a__ : Dict = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
a__ : Optional[Any] = model.forward(**lowerCamelCase__ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCamelCase__ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'gpt2-xl'
_lowercase = 3.31_91_85_48_54_15_21_87
| 37 | 0 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
A = random.Random()
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=1.0 , UpperCamelCase=None , UpperCamelCase=None ) -> Union[str, Any]:
"""simple docstring"""
if rng is None:
__UpperCAmelCase : Optional[Any] = global_rng
__UpperCAmelCase : str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class a__ ( unittest.TestCase ):
def __init__( self : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str=7 , UpperCamelCase_ : Any=400 , UpperCamelCase_ : str=2000 , UpperCamelCase_ : str=10 , UpperCamelCase_ : List[Any]=160 , UpperCamelCase_ : str=8 , UpperCamelCase_ : str=0.0 , UpperCamelCase_ : List[Any]=4000 , UpperCamelCase_ : str=False , UpperCamelCase_ : int=True , ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = parent
__UpperCAmelCase : Dict = batch_size
__UpperCAmelCase : List[Any] = min_seq_length
__UpperCAmelCase : Union[str, Any] = max_seq_length
__UpperCAmelCase : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCAmelCase : List[str] = padding_value
__UpperCAmelCase : Dict = sampling_rate
__UpperCAmelCase : Optional[Any] = return_attention_mask
__UpperCAmelCase : int = do_normalize
__UpperCAmelCase : str = feature_size
__UpperCAmelCase : Any = chunk_length
__UpperCAmelCase : Optional[Any] = hop_length
def a_ ( self : Optional[Any]):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a_ ( self : Dict , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : str=False):
"""simple docstring"""
def _flatten(UpperCamelCase_ : int):
return list(itertools.chain(*lowerCamelCase__))
if equal_length:
__UpperCAmelCase : Optional[Any] = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
__UpperCAmelCase : List[Any] = [
floats_list((x, self.feature_size))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
__UpperCAmelCase : Union[str, Any] = [np.asarray(lowerCamelCase__) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class a__ ( A__ , unittest.TestCase ):
lowercase_ = WhisperFeatureExtractor if is_speech_available() else None
def a_ ( self : List[Any]):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = WhisperFeatureExtractionTester(self)
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : int = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Union[str, Any] = feat_extract_first.save_pretrained(lowerCamelCase__)[0]
check_json_file_has_correct_format(lowerCamelCase__)
__UpperCAmelCase : str = self.feature_extraction_class.from_pretrained(lowerCamelCase__)
__UpperCAmelCase : Any = feat_extract_first.to_dict()
__UpperCAmelCase : int = feat_extract_second.to_dict()
__UpperCAmelCase : int = feat_extract_first.mel_filters
__UpperCAmelCase : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__))
self.assertEqual(lowerCamelCase__ , lowerCamelCase__)
def a_ ( self : List[Any]):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : int = os.path.join(lowerCamelCase__ , "feat_extract.json")
feat_extract_first.to_json_file(lowerCamelCase__)
__UpperCAmelCase : Tuple = self.feature_extraction_class.from_json_file(lowerCamelCase__)
__UpperCAmelCase : List[str] = feat_extract_first.to_dict()
__UpperCAmelCase : List[str] = feat_extract_second.to_dict()
__UpperCAmelCase : List[Any] = feat_extract_first.mel_filters
__UpperCAmelCase : Tuple = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__))
self.assertEqual(lowerCamelCase__ , lowerCamelCase__)
def a_ ( self : List[str]):
"""simple docstring"""
__UpperCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
__UpperCAmelCase : Tuple = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
__UpperCAmelCase : List[str] = [np.asarray(lowerCamelCase__) for speech_input in speech_inputs]
# Test feature size
__UpperCAmelCase : Optional[Any] = feature_extractor(lowerCamelCase__ , padding="max_length" , return_tensors="np").input_features
self.assertTrue(input_features.ndim == 3)
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames)
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size)
# Test not batched input
__UpperCAmelCase : int = feature_extractor(speech_inputs[0] , return_tensors="np").input_features
__UpperCAmelCase : int = feature_extractor(np_speech_inputs[0] , return_tensors="np").input_features
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3))
# Test batched
__UpperCAmelCase : int = feature_extractor(lowerCamelCase__ , return_tensors="np").input_features
__UpperCAmelCase : Union[str, Any] = feature_extractor(lowerCamelCase__ , return_tensors="np").input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3))
# Test 2-D numpy arrays are batched.
__UpperCAmelCase : Optional[Any] = [floats_list((1, x))[0] for x in (800, 800, 800)]
__UpperCAmelCase : int = np.asarray(lowerCamelCase__)
__UpperCAmelCase : Dict = feature_extractor(lowerCamelCase__ , return_tensors="np").input_features
__UpperCAmelCase : Optional[int] = feature_extractor(lowerCamelCase__ , return_tensors="np").input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3))
# Test truncation required
__UpperCAmelCase : int = [floats_list((1, x))[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200)]
__UpperCAmelCase : List[str] = [np.asarray(lowerCamelCase__) for speech_input in speech_inputs]
__UpperCAmelCase : Dict = [x[: feature_extractor.n_samples] for x in speech_inputs]
__UpperCAmelCase : List[Any] = [np.asarray(lowerCamelCase__) for speech_input in speech_inputs_truncated]
__UpperCAmelCase : List[str] = feature_extractor(lowerCamelCase__ , return_tensors="np").input_features
__UpperCAmelCase : List[Any] = feature_extractor(lowerCamelCase__ , return_tensors="np").input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3))
def a_ ( self : Dict):
"""simple docstring"""
import torch
__UpperCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
__UpperCAmelCase : List[Any] = np.random.rand(100 , 32).astype(np.floataa)
__UpperCAmelCase : List[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__UpperCAmelCase : Dict = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np")
self.assertTrue(np_processed.input_features.dtype == np.floataa)
__UpperCAmelCase : Tuple = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt")
self.assertTrue(pt_processed.input_features.dtype == torch.floataa)
def a_ ( self : Dict , UpperCamelCase_ : List[Any]):
"""simple docstring"""
__UpperCAmelCase : List[str] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation")
# automatic decoding with librispeech
__UpperCAmelCase : Dict = ds.sort("id").select(range(lowerCamelCase__))[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : int = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
])
# fmt: on
__UpperCAmelCase : str = self._load_datasamples(1)
__UpperCAmelCase : List[str] = WhisperFeatureExtractor()
__UpperCAmelCase : Optional[int] = feature_extractor(lowerCamelCase__ , return_tensors="pt").input_features
self.assertEqual(input_features.shape , (1, 80, 3000))
self.assertTrue(torch.allclose(input_features[0, 0, :30] , lowerCamelCase__ , atol=1e-4))
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
__UpperCAmelCase : Dict = self._load_datasamples(1)[0]
__UpperCAmelCase : int = ((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue
__UpperCAmelCase : Tuple = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCamelCase__)[0]
self.assertTrue(np.all(np.mean(lowerCamelCase__) < 1e-3))
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase__) - 1) < 1e-3))
| 77 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class A__ :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int]=100 , lowerCamelCase__ : str=13 , lowerCamelCase__ : Optional[int]=30 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : int=32 , lowerCamelCase__ : Union[str, Any]=4 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Union[str, Any]=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Union[str, Any]=10 , lowerCamelCase__ : str=0.02 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]=[0, 1, 2, 3] , ):
a__ : Dict = parent
a__ : Dict = 100
a__ : Optional[int] = batch_size
a__ : Union[str, Any] = image_size
a__ : Any = patch_size
a__ : Optional[Any] = num_channels
a__ : int = is_training
a__ : List[str] = use_labels
a__ : Optional[Any] = hidden_size
a__ : List[Any] = num_hidden_layers
a__ : str = num_attention_heads
a__ : str = intermediate_size
a__ : int = hidden_act
a__ : List[Any] = hidden_dropout_prob
a__ : Dict = attention_probs_dropout_prob
a__ : Union[str, Any] = type_sequence_label_size
a__ : Optional[Any] = initializer_range
a__ : List[str] = scope
a__ : int = out_indices
a__ : List[str] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a__ : Optional[int] = (image_size // patch_size) ** 2
a__ : Union[str, Any] = num_patches + 1
def _UpperCamelCase( self : int ):
a__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ : Optional[Any] = None
a__ : Tuple = None
if self.use_labels:
a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a__ : Optional[int] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _UpperCamelCase( self : Tuple ):
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any ):
a__ : str = BeitModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ):
a__ : int = BeitForMaskedImageModeling(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : List[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _UpperCamelCase( self : str , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any ):
a__ : List[str] = self.type_sequence_label_size
a__ : Optional[Any] = BeitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : str = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a__ : Optional[Any] = 1
a__ : List[str] = BeitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a__ : Union[str, Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCamelCase( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ):
a__ : int = self.num_labels
a__ : List[str] = BeitForSemanticSegmentation(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : Tuple = model(lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
a__ : str = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _UpperCamelCase( self : Optional[int] ):
a__ : Any = self.prepare_config_and_inputs()
a__, a__, a__, a__ : Union[str, Any] = config_and_inputs
a__ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
_lowercase = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
def _UpperCamelCase( self : Any ):
a__ : int = BeitModelTester(self )
a__ : Optional[Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def _UpperCamelCase( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def _UpperCamelCase( self : str ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def _UpperCamelCase( self : Dict ):
pass
def _UpperCamelCase( self : Optional[Any] ):
a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : List[str] = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def _UpperCamelCase( self : str ):
a__, a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : int = model_class(lowerCamelCase__ )
a__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Optional[int] = [*signature.parameters.keys()]
a__ : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
a__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _UpperCamelCase( self : int ):
a__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
a__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] ):
a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
if not self.model_tester.is_training:
return
a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a__ : str = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling]:
continue
a__ : List[str] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
a__ : Any = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
a__ : Tuple = model(**lowerCamelCase__ ).loss
loss.backward()
def _UpperCamelCase( self : Tuple ):
a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
a__ : List[Any] = False
a__ : List[str] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
a__ : Optional[Any] = model_class(lowerCamelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase__ )
model.train()
a__ : Union[str, Any] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
a__ : int = model(**lowerCamelCase__ ).loss
loss.backward()
def _UpperCamelCase( self : List[str] ):
a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Dict = _config_zero_init(lowerCamelCase__ )
for model_class in self.all_model_classes:
a__ : str = model_class(config=lowerCamelCase__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def _UpperCamelCase( self : Optional[int] ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Tuple = BeitModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def UpperCamelCase_ ( ) -> Any:
a__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase( self : Optional[int] ):
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def _UpperCamelCase( self : str ):
a__ : int = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(lowerCamelCase__ )
a__ : Optional[Any] = self.default_image_processor
a__ : Dict = prepare_img()
a__ : Optional[int] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).pixel_values.to(lowerCamelCase__ )
# prepare bool_masked_pos
a__ : Optional[Any] = torch.ones((1, 196) , dtype=torch.bool ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Any = model(pixel_values=lowerCamelCase__ , bool_masked_pos=lowerCamelCase__ )
a__ : Tuple = outputs.logits
# verify the logits
a__ : List[str] = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape , lowerCamelCase__ )
a__ : Optional[int] = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , lowerCamelCase__ , atol=1E-2 ) )
@slow
def _UpperCamelCase( self : Dict ):
a__ : str = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(lowerCamelCase__ )
a__ : int = self.default_image_processor
a__ : List[Any] = prepare_img()
a__ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Union[str, Any] = model(**lowerCamelCase__ )
a__ : List[str] = outputs.logits
# verify the logits
a__ : Union[str, Any] = torch.Size((1, 1_000) )
self.assertEqual(logits.shape , lowerCamelCase__ )
a__ : int = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
a__ : Tuple = 281
self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ )
@slow
def _UpperCamelCase( self : Any ):
a__ : Dict = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
lowerCamelCase__ )
a__ : str = self.default_image_processor
a__ : List[str] = prepare_img()
a__ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Dict = model(**lowerCamelCase__ )
a__ : List[str] = outputs.logits
# verify the logits
a__ : Optional[int] = torch.Size((1, 21_841) )
self.assertEqual(logits.shape , lowerCamelCase__ )
a__ : Optional[Any] = torch.tensor([1.6881, -0.2787, 0.5901] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
a__ : Optional[Any] = 2_396
self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ )
@slow
def _UpperCamelCase( self : int ):
a__ : Optional[Any] = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
a__ : Tuple = model.to(lowerCamelCase__ )
a__ : List[Any] = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ )
a__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
a__ : Union[str, Any] = Image.open(ds[0]["file"] )
a__ : List[Any] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Optional[Any] = model(**lowerCamelCase__ )
a__ : List[str] = outputs.logits
# verify the logits
a__ : Tuple = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , lowerCamelCase__ )
a__ : int = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
a__ : Dict = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=lowerCamelCase__ , )
else:
a__ : Dict = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=lowerCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def _UpperCamelCase( self : Tuple ):
a__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
a__ : List[Any] = model.to(lowerCamelCase__ )
a__ : int = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ )
a__ : Optional[int] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
a__ : str = Image.open(ds[0]["file"] )
a__ : str = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : List[Any] = model(**lowerCamelCase__ )
a__ : Any = outputs.logits.detach().cpu()
a__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ , target_sizes=[(500, 300)] )
a__ : Optional[int] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
a__ : List[str] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ )
a__ : Any = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
| 37 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"""microsoft/unispeech-sat-base-100h-libri-ft""": (
"""https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"""
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class _SCREAMING_SNAKE_CASE( A__ ):
SCREAMING_SNAKE_CASE_ : str = '''unispeech-sat'''
def __init__( self ,SCREAMING_SNAKE_CASE__=32 ,SCREAMING_SNAKE_CASE__=7_68 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=30_72 ,SCREAMING_SNAKE_CASE__="gelu" ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=1E-5 ,SCREAMING_SNAKE_CASE__="group" ,SCREAMING_SNAKE_CASE__="gelu" ,SCREAMING_SNAKE_CASE__=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) ,SCREAMING_SNAKE_CASE__=(5, 2, 2, 2, 2, 2, 2) ,SCREAMING_SNAKE_CASE__=(10, 3, 3, 3, 3, 2, 2) ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=1_28 ,SCREAMING_SNAKE_CASE__=16 ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=0.0_5 ,SCREAMING_SNAKE_CASE__=10 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=10 ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__=3_20 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=1_00 ,SCREAMING_SNAKE_CASE__=2_56 ,SCREAMING_SNAKE_CASE__=2_56 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__="mean" ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=2_56 ,SCREAMING_SNAKE_CASE__=(5_12, 5_12, 5_12, 5_12, 15_00) ,SCREAMING_SNAKE_CASE__=(5, 3, 3, 1, 1) ,SCREAMING_SNAKE_CASE__=(1, 2, 3, 1, 1) ,SCREAMING_SNAKE_CASE__=5_12 ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__=1 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=5_04 ,**SCREAMING_SNAKE_CASE__ ,) -> int:
"""simple docstring"""
super().__init__(**lowerCamelCase__ ,pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ )
__SCREAMING_SNAKE_CASE :Optional[int] = hidden_size
__SCREAMING_SNAKE_CASE :Optional[Any] = feat_extract_norm
__SCREAMING_SNAKE_CASE :Any = feat_extract_activation
__SCREAMING_SNAKE_CASE :Union[str, Any] = list(lowerCamelCase__ )
__SCREAMING_SNAKE_CASE :str = list(lowerCamelCase__ )
__SCREAMING_SNAKE_CASE :int = list(lowerCamelCase__ )
__SCREAMING_SNAKE_CASE :str = conv_bias
__SCREAMING_SNAKE_CASE :List[Any] = num_conv_pos_embeddings
__SCREAMING_SNAKE_CASE :Any = num_conv_pos_embedding_groups
__SCREAMING_SNAKE_CASE :Tuple = len(self.conv_dim )
__SCREAMING_SNAKE_CASE :Tuple = num_hidden_layers
__SCREAMING_SNAKE_CASE :Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE :Tuple = hidden_act
__SCREAMING_SNAKE_CASE :Optional[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE :int = hidden_dropout
__SCREAMING_SNAKE_CASE :Optional[int] = attention_dropout
__SCREAMING_SNAKE_CASE :Dict = activation_dropout
__SCREAMING_SNAKE_CASE :Optional[Any] = feat_proj_dropout
__SCREAMING_SNAKE_CASE :str = final_dropout
__SCREAMING_SNAKE_CASE :List[Any] = layerdrop
__SCREAMING_SNAKE_CASE :Any = layer_norm_eps
__SCREAMING_SNAKE_CASE :str = initializer_range
__SCREAMING_SNAKE_CASE :Dict = vocab_size
__SCREAMING_SNAKE_CASE :Tuple = num_clusters
__SCREAMING_SNAKE_CASE :Dict = do_stable_layer_norm
__SCREAMING_SNAKE_CASE :Dict = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__SCREAMING_SNAKE_CASE :List[str] = apply_spec_augment
__SCREAMING_SNAKE_CASE :Optional[Any] = mask_time_prob
__SCREAMING_SNAKE_CASE :str = mask_time_length
__SCREAMING_SNAKE_CASE :Optional[Any] = mask_time_min_masks
__SCREAMING_SNAKE_CASE :List[str] = mask_feature_prob
__SCREAMING_SNAKE_CASE :Optional[Any] = mask_feature_length
__SCREAMING_SNAKE_CASE :str = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__SCREAMING_SNAKE_CASE :List[str] = num_codevectors_per_group
__SCREAMING_SNAKE_CASE :int = num_codevector_groups
__SCREAMING_SNAKE_CASE :str = contrastive_logits_temperature
__SCREAMING_SNAKE_CASE :List[str] = feat_quantizer_dropout
__SCREAMING_SNAKE_CASE :int = num_negatives
__SCREAMING_SNAKE_CASE :Tuple = codevector_dim
__SCREAMING_SNAKE_CASE :Tuple = proj_codevector_dim
__SCREAMING_SNAKE_CASE :List[str] = diversity_loss_weight
# ctc loss
__SCREAMING_SNAKE_CASE :Dict = ctc_loss_reduction
__SCREAMING_SNAKE_CASE :Optional[Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__SCREAMING_SNAKE_CASE :List[str] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__SCREAMING_SNAKE_CASE :Union[str, Any] = list(lowerCamelCase__ )
__SCREAMING_SNAKE_CASE :Any = list(lowerCamelCase__ )
__SCREAMING_SNAKE_CASE :List[str] = list(lowerCamelCase__ )
__SCREAMING_SNAKE_CASE :Any = xvector_output_dim
@property
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
return functools.reduce(operator.mul ,self.conv_stride ,1 ) | 498 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCamelCase : Dict = logging.get_logger(__name__)
def UpperCamelCase_ ( __a ) -> Union[str, Any]:
a__ : Tuple = R"\w+[.]\d+"
a__ : List[Any] = re.findall(__a , __a )
for pat in pats:
a__ : Union[str, Any] = key.replace(__a , "_".join(pat.split("." ) ) )
return key
def UpperCamelCase_ ( __a , __a , __a ) -> List[str]:
a__ : List[str] = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
a__ : Any = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
a__ : Optional[Any] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
a__ : Union[str, Any] = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
a__ : List[str] = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
a__ : str = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
a__ : Tuple = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
a__ : Tuple = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
a__ : Optional[Any] = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
a__ : Union[str, Any] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCamelCase_ ( __a , __a , __a=42 ) -> str:
# Step 1: Convert pytorch tensor to numpy
a__ : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
a__ : Tuple = flax_model.init_weights(PRNGKey(__a ) )
a__ : Optional[Any] = flatten_dict(__a )
a__ : Union[str, Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
a__ : Optional[int] = rename_key(__a )
a__ : Optional[int] = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
a__, a__ : Union[str, Any] = rename_key_and_reshape_tensor(__a , __a , __a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
a__ : str = jnp.asarray(__a )
return unflatten_dict(__a )
| 37 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class a ( A__ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = CanineTokenizer
__lowerCAmelCase = False
def lowercase_ ( self ):
'''simple docstring'''
super().setUp()
__UpperCAmelCase: str = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_ ( self ):
'''simple docstring'''
return CanineTokenizer.from_pretrained("""google/canine-s""" )
def lowercase_ ( self , **snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Dict = self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
__UpperCAmelCase: Optional[int] = 1024
return tokenizer
@require_torch
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Dict = self.canine_tokenizer
__UpperCAmelCase: Union[str, Any] = ["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
__UpperCAmelCase: Dict = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
__UpperCAmelCase: str = tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="""pt""" )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase: str = list(batch.input_ids.numpy()[0] )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = self.canine_tokenizer
__UpperCAmelCase: List[Any] = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
__UpperCAmelCase: Any = tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="""pt""" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("""input_ids""" , lowerCamelCase__ )
self.assertIn("""attention_mask""" , lowerCamelCase__ )
self.assertIn("""token_type_ids""" , lowerCamelCase__ )
@require_torch
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: int = self.canine_tokenizer
__UpperCAmelCase: List[Any] = [
"What's the weater?",
"It's about 25 degrees.",
]
__UpperCAmelCase: Dict = tokenizer(
text_target=lowerCamelCase__ , max_length=32 , padding="""max_length""" , truncation=lowerCamelCase__ , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__UpperCAmelCase: List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCAmelCase: Optional[Any] = tempfile.mkdtemp()
__UpperCAmelCase: int = " He is very happy, UNwant\u00E9d,running"
__UpperCAmelCase: Optional[int] = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
tokenizer.save_pretrained(lowerCamelCase__ )
__UpperCAmelCase: int = tokenizer.__class__.from_pretrained(lowerCamelCase__ )
__UpperCAmelCase: Optional[int] = after_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
shutil.rmtree(lowerCamelCase__ )
__UpperCAmelCase: Union[str, Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCAmelCase: Tuple = tempfile.mkdtemp()
__UpperCAmelCase: int = " He is very happy, UNwant\u00E9d,running"
__UpperCAmelCase: Any = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__UpperCAmelCase: List[Any] = chr(0XE0_07 )
additional_special_tokens.append(lowerCamelCase__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
__UpperCAmelCase: str = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
tokenizer.save_pretrained(lowerCamelCase__ )
__UpperCAmelCase: Tuple = tokenizer.__class__.from_pretrained(lowerCamelCase__ )
__UpperCAmelCase: List[Any] = after_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertIn(lowerCamelCase__ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__UpperCAmelCase: List[str] = tokenizer.__class__.from_pretrained(lowerCamelCase__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = self.get_tokenizers(do_lower_case=lowerCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase: Tuple = self.get_clean_sequence(lowerCamelCase__ )
# a special token for Canine can be defined as follows:
__UpperCAmelCase: List[Any] = 0XE0_05
__UpperCAmelCase: Optional[Any] = chr(lowerCamelCase__ )
tokenizer.add_special_tokens({"""cls_token""": special_token} )
__UpperCAmelCase: str = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(len(lowerCamelCase__ ) , 1 )
__UpperCAmelCase: Optional[Any] = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=lowerCamelCase__ )
__UpperCAmelCase: Any = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
__UpperCAmelCase: Optional[int] = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
__UpperCAmelCase: Any = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , input_encoded + special_token_id )
__UpperCAmelCase: List[Any] = tokenizer.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
self.assertTrue(special_token not in decoded )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = self.get_tokenizers(do_lower_case=lowerCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase: List[Any] = chr(0XE0_05 )
__UpperCAmelCase: str = chr(0XE0_06 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=lowerCamelCase__ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} )
__UpperCAmelCase: Dict = tokenizer.tokenize(lowerCamelCase__ )
__UpperCAmelCase: int = tokenizer.tokenize(lowerCamelCase__ )
self.assertEqual(len(lowerCamelCase__ ) , 1 )
self.assertEqual(len(lowerCamelCase__ ) , 1 )
self.assertEqual(token_a[0] , lowerCamelCase__ )
self.assertEqual(token_a[0] , lowerCamelCase__ )
@require_tokenizers
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Any = self.get_tokenizers(do_lower_case=lowerCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
__UpperCAmelCase: List[Any] = 0XE0_06
__UpperCAmelCase: Union[str, Any] = chr(lowerCamelCase__ )
__UpperCAmelCase: Optional[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(lowerCamelCase__ )
tokenizer.from_pretrained(lowerCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: str = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase__ )
with open(os.path.join(lowerCamelCase__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
__UpperCAmelCase: Dict = json.load(lowerCamelCase__ )
with open(os.path.join(lowerCamelCase__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
__UpperCAmelCase: Tuple = json.load(lowerCamelCase__ )
# a special token for Canine can be defined as follows:
__UpperCAmelCase: Optional[int] = 0XE0_06
__UpperCAmelCase: Tuple = chr(lowerCamelCase__ )
__UpperCAmelCase: List[Any] = [new_token_a]
__UpperCAmelCase: Optional[Any] = [new_token_a]
with open(os.path.join(lowerCamelCase__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
with open(os.path.join(lowerCamelCase__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__UpperCAmelCase: Any = tokenizer_class.from_pretrained(lowerCamelCase__ , extra_ids=0 )
self.assertIn(lowerCamelCase__ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
__UpperCAmelCase: Optional[Any] = 0XE0_07
__UpperCAmelCase: str = chr(lowerCamelCase__ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__UpperCAmelCase: List[str] = [AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ )]
__UpperCAmelCase: List[str] = tokenizer_class.from_pretrained(
lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , extra_ids=0 )
self.assertIn(lowerCamelCase__ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: int = self.get_tokenizers(do_lower_case=lowerCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase: Optional[Any] = "hello world"
if self.space_between_special_tokens:
__UpperCAmelCase: Any = "[CLS] hello world [SEP]"
else:
__UpperCAmelCase: str = input
__UpperCAmelCase: Any = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
__UpperCAmelCase: Dict = tokenizer.decode(lowerCamelCase__ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(lowerCamelCase__ , [output, output.lower()] )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase: int = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
__UpperCAmelCase: Optional[Any] = "a"
__UpperCAmelCase: Optional[Any] = ord(lowerCamelCase__ )
for attr in attributes_list:
setattr(lowerCamelCase__ , attr + """_id""" , lowerCamelCase__ )
self.assertEqual(getattr(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(getattr(lowerCamelCase__ , attr + """_id""" ) , lowerCamelCase__ )
setattr(lowerCamelCase__ , attr + """_id""" , lowerCamelCase__ )
self.assertEqual(getattr(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(getattr(lowerCamelCase__ , attr + """_id""" ) , lowerCamelCase__ )
setattr(lowerCamelCase__ , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(lowerCamelCase__ , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(lowerCamelCase__ , """additional_special_tokens_ids""" ) , [] )
__UpperCAmelCase: List[Any] = 0XE0_06
__UpperCAmelCase: Dict = chr(lowerCamelCase__ )
setattr(lowerCamelCase__ , """additional_special_tokens_ids""" , [additional_special_token_id] )
self.assertListEqual(getattr(lowerCamelCase__ , """additional_special_tokens""" ) , [additional_special_token] )
self.assertListEqual(getattr(lowerCamelCase__ , """additional_special_tokens_ids""" ) , [additional_special_token_id] )
def lowercase_ ( self ):
'''simple docstring'''
pass
def lowercase_ ( self ):
'''simple docstring'''
pass
def lowercase_ ( self ):
'''simple docstring'''
pass
def lowercase_ ( self ):
'''simple docstring'''
pass
def lowercase_ ( self ):
'''simple docstring'''
pass
def lowercase_ ( self ):
'''simple docstring'''
pass
def lowercase_ ( self ):
'''simple docstring'''
pass
def lowercase_ ( self ):
'''simple docstring'''
pass | 523 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCamelCase_ ( ) -> int:
a__ : Any = HfArgumentParser(__a )
a__ : Any = parser.parse_args_into_dataclasses()[0]
a__ : Optional[int] = TensorFlowBenchmark(args=__a )
try:
a__ : Optional[int] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
a__ : Tuple = "Arg --no_{0} is no longer used, please use --no-{0} instead."
a__ : List[Any] = " ".join(str(__a ).split(" " )[:-1] )
a__ : str = ""
a__ : List[Any] = eval(str(__a ).split(" " )[-1] )
a__ : List[str] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__a )
if len(__a ) > 0:
a__ : Tuple = full_error_msg + begin_error_msg + str(__a )
raise ValueError(__a )
benchmark.run()
if __name__ == "__main__":
main()
| 37 | 0 |
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__lowerCAmelCase : Tuple = """true"""
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str=8_2 , UpperCamelCase__ : int=1_6 ):
"""simple docstring"""
set_seed(4_2 )
__UpperCAmelCase = RegressionModel()
__UpperCAmelCase = deepcopy(__a )
__UpperCAmelCase = RegressionDataset(length=__a )
__UpperCAmelCase = DataLoader(__a , batch_size=__a )
model.to(accelerator.device )
__UpperCAmelCase = accelerator.prepare(__a , __a )
return model, ddp_model, dataloader
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : int=False ):
"""simple docstring"""
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
__UpperCAmelCase = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(UpperCamelCase__ : Union[str, Any] ):
__UpperCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__a , max_length=__a )
return outputs
with accelerator.main_process_first():
__UpperCAmelCase = dataset.map(
__a , batched=__a , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
__UpperCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(UpperCamelCase__ : Tuple ):
if use_longest:
return tokenizer.pad(__a , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(__a , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return DataLoader(__a , shuffle=__a , collate_fn=__a , batch_size=1_6 )
def lowerCAmelCase ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ):
"""simple docstring"""
__UpperCAmelCase = Accelerator(dispatch_batches=__a , split_batches=__a )
__UpperCAmelCase = get_dataloader(__a , not dispatch_batches )
__UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=__a )
__UpperCAmelCase = accelerator.prepare(__a , __a )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = []
for batch in dataloader:
__UpperCAmelCase = batch.values()
with torch.no_grad():
__UpperCAmelCase = model(__a )
__UpperCAmelCase = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__UpperCAmelCase = [], []
for logit, targ in logits_and_targets:
logits.append(__a )
targs.append(__a )
__UpperCAmelCase = torch.cat(__a ), torch.cat(__a )
return logits, targs
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : Tuple=8_2 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Dict=False , UpperCamelCase__ : List[str]=1_6 ):
"""simple docstring"""
__UpperCAmelCase = get_basic_setup(__a , __a , __a )
__UpperCAmelCase = generate_predictions(__a , __a , __a )
assert (
len(__a ) == num_samples
), f"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__a )}"""
def lowerCAmelCase ( UpperCamelCase__ : List[Any] = False , UpperCamelCase__ : Optional[Any] = False ):
"""simple docstring"""
__UpperCAmelCase = evaluate.load('''glue''' , '''mrpc''' )
__UpperCAmelCase = get_mrpc_setup(__a , __a )
# First do baseline
__UpperCAmelCase = setup["no"]
model.to(__a )
model.eval()
for batch in dataloader:
batch.to(__a )
with torch.inference_mode():
__UpperCAmelCase = model(**__a )
__UpperCAmelCase = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__a , references=batch['''labels'''] )
__UpperCAmelCase = metric.compute()
# Then do distributed
__UpperCAmelCase = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
__UpperCAmelCase = model(**__a )
__UpperCAmelCase = outputs.logits.argmax(dim=-1 )
__UpperCAmelCase = batch["labels"]
__UpperCAmelCase = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__a , references=__a )
__UpperCAmelCase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = Accelerator(split_batches=__a , dispatch_batches=__a )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(__a , __a )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__UpperCAmelCase = Accelerator(split_batches=__a , dispatch_batches=__a )
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(__a , 9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
__UpperCAmelCase = Accelerator()
test_torch_metrics(__a , 5_1_2 )
accelerator.state._reset_state()
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 262 |
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
UpperCamelCase : Optional[int] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def UpperCamelCase_ ( __a ) -> Any:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def UpperCamelCase_ ( __a , __a , __a ) -> Any:
return max(metric_fn(__a , __a ) for gt in ground_truths )
def UpperCamelCase_ ( __a , __a , __a ) -> List[str]:
a__ : Tuple = [line.strip() for line in open(__a , "r" ).readlines()]
a__ : Tuple = []
if args.gold_data_mode == "qa":
a__ : Any = pd.read_csv(__a , sep="\t" , header=__a )
for answer_list in data[1]:
a__ : Union[str, Any] = ast.literal_eval(__a )
answers.append(__a )
else:
a__ : List[str] = [line.strip() for line in open(__a , "r" ).readlines()]
a__ : List[str] = [[reference] for reference in references]
a__ : List[str] = 0
for prediction, ground_truths in zip(__a , __a ):
total += 1
em += metric_max_over_ground_truths(__a , __a , __a )
fa += metric_max_over_ground_truths(__a , __a , __a )
a__ : Dict = 100.0 * em / total
a__ : Optional[Any] = 100.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]:
a__ : Optional[Any] = args.k
a__ : str = [line.strip() for line in open(__a , "r" ).readlines()]
a__ : Tuple = [line.strip() for line in open(__a , "r" ).readlines()]
a__ : Tuple = 0
for hypo, reference in zip(__a , __a ):
a__ : Any = set(hypo.split("\t" )[:k] )
a__ : Union[str, Any] = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
a__ : Union[str, Any] = 100.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]:
def strip_title(__a ):
if title.startswith("\"" ):
a__ : Optional[Any] = title[1:]
if title.endswith("\"" ):
a__ : Union[str, Any] = title[:-1]
return title
a__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__a , return_tensors="pt" , padding=__a , truncation=__a , )["input_ids"].to(args.device )
a__ : Optional[int] = rag_model.rag.question_encoder(__a )
a__ : Union[str, Any] = question_enc_outputs[0]
a__ : Optional[int] = rag_model.retriever(
__a , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
a__ : List[Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
a__ : int = []
for docs in all_docs:
a__ : Optional[int] = [strip_title(__a ) for title in docs["title"]]
provenance_strings.append("\t".join(__a ) )
return provenance_strings
def UpperCamelCase_ ( __a , __a , __a ) -> Dict:
with torch.no_grad():
a__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__a , return_tensors="pt" , padding=__a , truncation=__a )
a__ : Any = inputs_dict.input_ids.to(args.device )
a__ : Dict = inputs_dict.attention_mask.to(args.device )
a__ : Optional[int] = rag_model.generate( # rag_model overwrites generate
__a , attention_mask=__a , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__a , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
a__ : int = rag_model.retriever.generator_tokenizer.batch_decode(__a , skip_special_tokens=__a )
if args.print_predictions:
for q, a in zip(__a , __a ):
logger.info("Q: {} - A: {}".format(__a , __a ) )
return answers
def UpperCamelCase_ ( ) -> List[str]:
a__ : int = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=__a , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=__a , choices=["exact", "compressed", "legacy"] , type=__a , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=__a , type=__a , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=__a , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=__a , type=__a , required=__a , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=__a , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=__a , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=__a , type=__a , required=__a , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=__a , type=__a , required=__a , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=__a , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=__a , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=__a , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=__a , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=__a , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=__a , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
a__ : int = parser.parse_args()
a__ : Dict = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def UpperCamelCase_ ( __a ) -> Optional[int]:
a__ : Tuple = {}
if args.model_type is None:
a__ : List[str] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
a__ : int = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
a__ : Tuple = args.n_docs
if args.index_name is not None:
a__ : Any = args.index_name
if args.index_path is not None:
a__ : int = args.index_path
else:
a__ : Optional[Any] = BartForConditionalGeneration
a__ : Tuple = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , __a )
a__ : Any = get_scores if args.eval_mode == "e2e" else get_precision_at_k
a__ : Union[str, Any] = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(__a , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(__a ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
a__ : str = RagRetriever.from_pretrained(__a , **__a )
a__ : Optional[int] = model_class.from_pretrained(__a , retriever=__a , **__a )
model.retriever.init_retrieval()
else:
a__ : Dict = model_class.from_pretrained(__a , **__a )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
a__ : List[Any] = []
for line in tqdm(__a ):
questions.append(line.strip() )
if len(__a ) == args.eval_batch_size:
a__ : Union[str, Any] = evaluate_batch_fn(__a , __a , __a )
preds_file.write("\n".join(__a ) + "\n" )
preds_file.flush()
a__ : Any = []
if len(__a ) > 0:
a__ : List[str] = evaluate_batch_fn(__a , __a , __a )
preds_file.write("\n".join(__a ) )
preds_file.flush()
score_fn(__a , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
UpperCamelCase : List[Any] = get_args()
main(args)
| 37 | 0 |
'''simple docstring'''
def snake_case ( a_ : List[Any] ) -> bool:
"""simple docstring"""
return str(__a ) == str(__a )[::-1]
def snake_case ( a_ : Tuple ) -> int:
"""simple docstring"""
return int(__a ) + int(str(__a )[::-1] )
def snake_case ( a_ : Optional[int] = 10_000 ) -> int:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = []
for num in range(1 , __a ):
UpperCamelCase_ : Optional[Any] = 0
UpperCamelCase_ : List[Any] = num
while iterations < 50:
UpperCamelCase_ : Tuple = sum_reverse(__a )
iterations += 1
if is_palindrome(__a ):
break
else:
lychrel_nums.append(__a )
return len(__a )
if __name__ == "__main__":
print(f"{solution() = }")
| 208 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a = None , ) -> str:
a__ : int = {}
if train_file is not None:
a__ : int = [train_file]
if eval_file is not None:
a__ : Union[str, Any] = [eval_file]
if test_file is not None:
a__ : str = [test_file]
a__ : Optional[Any] = datasets.load_dataset("csv" , data_files=__a )
a__ : List[Any] = list(ds[list(files.keys() )[0]].features.keys() )
a__ : str = features_name.pop(__a )
a__ : Dict = list(set(ds[list(files.keys() )[0]][label_name] ) )
a__ : str = {label: i for i, label in enumerate(__a )}
a__ : Tuple = tokenizer.model_input_names
a__ : List[str] = {}
if len(__a ) == 1:
for k in files.keys():
a__ : Optional[Any] = ds[k].map(
lambda __a : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__a , max_length=__a , padding="max_length" ) , batched=__a , )
elif len(__a ) == 2:
for k in files.keys():
a__ : Dict = ds[k].map(
lambda __a : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__a , max_length=__a , padding="max_length" , ) , batched=__a , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
a__ : str = {k: v for k, v in ex.items() if k in input_names}
a__ : str = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
a__ : Tuple = {k: v for k, v in ex.items() if k in input_names}
a__ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
a__ : List[Any] = {k: v for k, v in ex.items() if k in input_names}
a__ : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
a__ : Optional[Any] = (
tf.data.Dataset.from_generator(
__a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
a__ : Optional[int] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
a__ : Union[str, Any] = (
tf.data.Dataset.from_generator(
__a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
a__ : Optional[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
a__ : Union[str, Any] = (
tf.data.Dataset.from_generator(
__a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
a__ : Tuple = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
UpperCamelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass
class A__ :
"""simple docstring"""
_lowercase = field(metadata={'help': 'Which column contains the label'} )
_lowercase = field(default=A__ , metadata={'help': 'The path of the training file'} )
_lowercase = field(default=A__ , metadata={'help': 'The path of the development file'} )
_lowercase = field(default=A__ , metadata={'help': 'The path of the test file'} )
_lowercase = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_lowercase = field(
default=A__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class A__ :
"""simple docstring"""
_lowercase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_lowercase = field(
default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_lowercase = field(
default=A__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_lowercase = field(default=A__ , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_lowercase = field(
default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def UpperCamelCase_ ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a__ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
a__, a__, a__ : str = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a__ : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a__, a__, a__, a__ : Optional[Any] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__a , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
a__ : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__a ) , labelaid=__a , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
a__ : Any = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , )
def compute_metrics(__a ) -> Dict:
a__ : Union[str, Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
a__ : Dict = TFTrainer(
model=__a , args=__a , train_dataset=__a , eval_dataset=__a , compute_metrics=__a , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a__ : Optional[Any] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a__ : Dict = trainer.evaluate()
a__ : int = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(__a , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(__a )
return results
if __name__ == "__main__":
main()
| 37 | 0 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
lowercase : Union[str, Any] = getLogger(__name__)
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 8 , _UpperCAmelCase = 1_0_2_4 , _UpperCAmelCase="val" , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase="summarization" , _UpperCAmelCase=None , _UpperCAmelCase=1 , _UpperCAmelCase = None , _UpperCAmelCase="" , **_UpperCAmelCase , ):
lowerCamelCase_: List[str] = str(__a )
assert local_rank is not None
torch.distributed.init_process_group(backend="""nccl""" , rank=__a )
lowerCamelCase_: Tuple = Path(__a )
lowerCamelCase_: Tuple = save_dir.joinpath(f"""rank_{local_rank}_output.json""" )
torch.cuda.set_device(__a )
lowerCamelCase_: Dict = AutoModelForSeqaSeqLM.from_pretrained(__a ).cuda()
if fpaa:
lowerCamelCase_: List[str] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(__a , __a ) # update config with task specific params
lowerCamelCase_: Optional[int] = generate_kwargs.pop("""num_beams""" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
lowerCamelCase_: Tuple = num_return_sequences
lowerCamelCase_: int = AutoTokenizer.from_pretrained(__a )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
if max_source_length is None:
lowerCamelCase_: Optional[int] = tokenizer.model_max_length
if prefix is None:
lowerCamelCase_: List[str] = prefix or getattr(model.config , """prefix""" , """""" ) or ""
lowerCamelCase_: Tuple = SeqaSeqDataset(
__a , __a , __a , max_target_length=1_0_2_4 , type_path=__a , n_obs=__a , prefix=__a , **__a , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
lowerCamelCase_: Any = ds.make_sortish_sampler(__a , distributed=__a , add_extra_examples=__a , shuffle=__a )
lowerCamelCase_: List[str] = DataLoader(__a , sampler=__a , batch_size=__a , collate_fn=ds.collate_fn )
lowerCamelCase_: List[str] = []
for batch in tqdm(__a ):
lowerCamelCase_: List[str] = model.generate(
input_ids=batch["""input_ids"""].to(model.device ) , attention_mask=batch["""attention_mask"""].to(model.device ) , num_return_sequences=__a , num_beams=__a , **__a , )
lowerCamelCase_: int = tokenizer.batch_decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
lowerCamelCase_: Optional[int] = batch["ids"]
if num_return_sequences > 1:
lowerCamelCase_: Any = chunks(__a , __a ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(__a ):
results.append({"""pred""": pred, """id""": ids[i].item()} )
save_json(__a , __a )
return results, sampler.num_replicas
def UpperCAmelCase_ ( ):
lowerCamelCase_: Any = argparse.ArgumentParser(
epilog="""Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate""" )
parser.add_argument("""--data_dir""" , type=__a , help="""like cnn_dm/test.source""" )
parser.add_argument(
"""--model_name""" , type=__a , help="""like facebook/bart-large-cnn,t5-base, etc.""" , default="""sshleifer/distilbart-xsum-12-3""" , )
parser.add_argument("""--save_dir""" , type=__a , help="""where to save""" , default="""tmp_gen""" )
parser.add_argument("""--max_source_length""" , type=__a , default=__a )
parser.add_argument(
"""--type_path""" , type=__a , default="""test""" , help="""which subset to evaluate typically train/val/test""" )
parser.add_argument("""--task""" , type=__a , default="""summarization""" , help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""" , type=__a , default=8 , required=__a , help="""batch size""" )
parser.add_argument(
"""--local_rank""" , type=__a , default=-1 , required=__a , help="""should be passed by distributed.launch""" )
parser.add_argument(
"""--n_obs""" , type=__a , default=__a , required=__a , help="""How many observations. Defaults to all.""" )
parser.add_argument(
"""--num_return_sequences""" , type=__a , default=1 , required=__a , help="""How many sequences to return""" )
parser.add_argument(
"""--sync_timeout""" , type=__a , default=6_0_0 , required=__a , help="""How long should master process wait for other processes to finish.""" , )
parser.add_argument("""--src_lang""" , type=__a , default=__a , required=__a )
parser.add_argument("""--tgt_lang""" , type=__a , default=__a , required=__a )
parser.add_argument(
"""--prefix""" , type=__a , required=__a , default=__a , help="""will be added to the begininng of src examples""" )
parser.add_argument("""--fp16""" , action="""store_true""" )
parser.add_argument("""--debug""" , action="""store_true""" )
lowerCamelCase_: Tuple = time.time()
lowerCamelCase_: str = parser.parse_known_args()
lowerCamelCase_: Any = parse_numeric_n_bool_cl_kwargs(__a )
if generate_kwargs and args.local_rank <= 0:
print(f"""parsed the following generate kwargs: {generate_kwargs}""" )
lowerCamelCase_: int = Path(args.save_dir + """_tmp""" )
Path(__a ).mkdir(exist_ok=__a ) # this handles locking.
lowerCamelCase_: Any = list(json_save_dir.glob("""rank_*.json""" ) )
if intermediate_files:
raise ValueError(f"""Found files at {json_save_dir} please move or remove them.""" )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
lowerCamelCase_: Dict = {}
if args.src_lang is not None:
lowerCamelCase_: Any = args.src_lang
if args.tgt_lang is not None:
lowerCamelCase_: Tuple = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=__a )
lowerCamelCase_: int = eval_data_dir(
args.data_dir , __a , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=__a , **__a , )
if args.local_rank <= 0:
lowerCamelCase_: Tuple = Path(args.save_dir )
save_dir.mkdir(exist_ok=__a )
lowerCamelCase_: Optional[int] = gather_results_from_each_node(__a , __a , args.sync_timeout )
lowerCamelCase_: Tuple = combine_partial_results(__a )
if args.num_return_sequences > 1:
lowerCamelCase_: Dict = save_dir.joinpath("""pseudolabel_results.json""" )
print(f"""Saving aggregated results at {save_path}, intermediate in {json_save_dir}/""" )
save_json(__a , __a )
return
lowerCamelCase_: str = Path(args.data_dir ).joinpath(args.type_path + """.target""" )
with open(__a ) as f:
lowerCamelCase_: Union[str, Any] = [x.rstrip() for x in f.readlines()][: len(__a )]
# Calculate metrics, save metrics, and save _generations.txt
lowerCamelCase_: Dict = "translation" in args.task
lowerCamelCase_: str = calculate_bleu if calc_bleu else calculate_rouge
lowerCamelCase_: str = "bleu" if calc_bleu else "rouge"
lowerCamelCase_: Dict = score_fn(__a , __a )
lowerCamelCase_: List[str] = len(__a )
lowerCamelCase_: Any = time.time() - start_time
lowerCamelCase_: Optional[int] = round(runtime / metrics["""n_obs"""] , 4 )
lowerCamelCase_: int = num_replicas
# TODO(@stas00): add whatever metadata to metrics
lowerCamelCase_: Dict = save_dir.joinpath(f"""{args.type_path}_{metric_name}.json""" )
save_json(__a , __a , indent=__a )
print(__a )
write_txt_file(__a , save_dir.joinpath(f"""{args.type_path}_generations.txt""" ) )
if args.debug:
write_txt_file(__a , save_dir.joinpath(f"""{args.type_path}.target""" ) )
else:
shutil.rmtree(__a )
def UpperCAmelCase_ ( _UpperCAmelCase ):
lowerCamelCase_: List[str] = []
for partial_result in partial_results:
records.extend(__a )
lowerCamelCase_: List[str] = sorted(__a , key=lambda _UpperCAmelCase : x["id"] )
lowerCamelCase_: Union[str, Any] = [x["pred"] for x in records]
return preds
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# WAIT FOR lots of .json files
lowerCamelCase_: List[Any] = time.time()
logger.info("""waiting for all nodes to finish""" )
lowerCamelCase_: Dict = None
while (time.time() - start_wait) < timeout:
lowerCamelCase_: Optional[Any] = list(save_dir.glob("""rank_*.json""" ) )
if len(__a ) < num_replicas:
continue
try:
# make sure all json files are fully saved
lowerCamelCase_: Union[str, Any] = lmap(__a , __a )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("""Rank 0 gave up on waiting for other processes""" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 423 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
UpperCamelCase : List[str] = re.compile(r"""\b(a|an|the)\b""", re.UNICODE)
UpperCamelCase : Union[str, Any] = None
def UpperCamelCase_ ( ) -> List[str]:
a__ : List[Any] = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=__a , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=__a , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def UpperCamelCase_ ( __a ) -> str:
a__ : Optional[Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a__ : Dict = bool(qa["answers"]["text"] )
return qid_to_has_ans
def UpperCamelCase_ ( __a ) -> List[Any]:
def remove_articles(__a ):
return ARTICLES_REGEX.sub(" " , __a )
def white_space_fix(__a ):
return " ".join(text.split() )
def remove_punc(__a ):
a__ : Union[str, Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__a ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__a ) ) ) )
def UpperCamelCase_ ( __a ) -> Dict:
if not s:
return []
return normalize_answer(__a ).split()
def UpperCamelCase_ ( __a , __a ) -> str:
return int(normalize_answer(__a ) == normalize_answer(__a ) )
def UpperCamelCase_ ( __a , __a ) -> Dict:
a__ : int = get_tokens(__a )
a__ : Optional[Any] = get_tokens(__a )
a__ : Any = collections.Counter(__a ) & collections.Counter(__a )
a__ : Dict = sum(common.values() )
if len(__a ) == 0 or len(__a ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
a__ : Tuple = 1.0 * num_same / len(__a )
a__ : str = 1.0 * num_same / len(__a )
a__ : str = (2 * precision * recall) / (precision + recall)
return fa
def UpperCamelCase_ ( __a , __a ) -> int:
a__ : List[str] = {}
a__ : Optional[int] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a__ : List[Any] = qa["id"]
a__ : Dict = [t for t in qa["answers"]["text"] if normalize_answer(__a )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
a__ : Tuple = [""]
if qid not in preds:
print(f'''Missing prediction for {qid}''' )
continue
a__ : Tuple = preds[qid]
# Take max over all gold answers
a__ : Optional[int] = max(compute_exact(__a , __a ) for a in gold_answers )
a__ : str = max(compute_fa(__a , __a ) for a in gold_answers )
return exact_scores, fa_scores
def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[int]:
a__ : Optional[Any] = {}
for qid, s in scores.items():
a__ : Dict = na_probs[qid] > na_prob_thresh
if pred_na:
a__ : Dict = float(not qid_to_has_ans[qid] )
else:
a__ : Optional[Any] = s
return new_scores
def UpperCamelCase_ ( __a , __a , __a=None ) -> Tuple:
if not qid_list:
a__ : Union[str, Any] = len(__a )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
a__ : int = len(__a )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def UpperCamelCase_ ( __a , __a , __a ) -> List[str]:
for k in new_eval:
a__ : Optional[Any] = new_eval[k]
def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[int]:
plt.step(__a , __a , color="b" , alpha=0.2 , where="post" )
plt.fill_between(__a , __a , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__a )
plt.savefig(__a )
plt.clf()
def UpperCamelCase_ ( __a , __a , __a , __a , __a=None , __a=None ) -> Dict:
a__ : Optional[Any] = sorted(__a , key=lambda __a : na_probs[k] )
a__ : Any = 0.0
a__ : Optional[int] = 1.0
a__ : Optional[int] = 0.0
a__ : Any = [1.0]
a__ : Tuple = [0.0]
a__ : List[str] = 0.0
for i, qid in enumerate(__a ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
a__ : Any = true_pos / float(i + 1 )
a__ : int = true_pos / float(__a )
if i == len(__a ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__a )
recalls.append(__a )
if out_image:
plot_pr_curve(__a , __a , __a , __a )
return {"ap": 100.0 * avg_prec}
def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a ) -> str:
if out_image_dir and not os.path.exists(__a ):
os.makedirs(__a )
a__ : Optional[int] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
a__ : Optional[int] = make_precision_recall_eval(
__a , __a , __a , __a , out_image=os.path.join(__a , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
a__ : Optional[Any] = make_precision_recall_eval(
__a , __a , __a , __a , out_image=os.path.join(__a , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
a__ : str = {k: float(__a ) for k, v in qid_to_has_ans.items()}
a__ : Optional[Any] = make_precision_recall_eval(
__a , __a , __a , __a , out_image=os.path.join(__a , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(__a , __a , "pr_exact" )
merge_eval(__a , __a , "pr_f1" )
merge_eval(__a , __a , "pr_oracle" )
def UpperCamelCase_ ( __a , __a , __a , __a ) -> str:
if not qid_list:
return
a__ : Optional[Any] = [na_probs[k] for k in qid_list]
a__ : str = np.ones_like(__a ) / float(len(__a ) )
plt.hist(__a , weights=__a , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(f'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(__a , f'''na_prob_hist_{name}.png''' ) )
plt.clf()
def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[Any]:
a__ : str = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
a__ : Optional[Any] = num_no_ans
a__ : Dict = cur_score
a__ : Any = 0.0
a__ : Optional[Any] = sorted(__a , key=lambda __a : na_probs[k] )
for i, qid in enumerate(__a ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
a__ : Optional[int] = scores[qid]
else:
if preds[qid]:
a__ : str = -1
else:
a__ : Union[str, Any] = 0
cur_score += diff
if cur_score > best_score:
a__ : Any = cur_score
a__ : Dict = na_probs[qid]
return 100.0 * best_score / len(__a ), best_thresh
def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a ) -> Any:
a__, a__ : Tuple = find_best_thresh(__a , __a , __a , __a )
a__, a__ : Tuple = find_best_thresh(__a , __a , __a , __a )
a__ : Any = best_exact
a__ : Any = exact_thresh
a__ : List[Any] = best_fa
a__ : Optional[int] = fa_thresh
def UpperCamelCase_ ( ) -> Tuple:
with open(OPTS.data_file ) as f:
a__ : List[Any] = json.load(__a )
a__ : Any = dataset_json["data"]
with open(OPTS.pred_file ) as f:
a__ : int = json.load(__a )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
a__ : List[str] = json.load(__a )
else:
a__ : Optional[int] = {k: 0.0 for k in preds}
a__ : Optional[Any] = make_qid_to_has_ans(__a ) # maps qid to True/False
a__ : List[Any] = [k for k, v in qid_to_has_ans.items() if v]
a__ : Union[str, Any] = [k for k, v in qid_to_has_ans.items() if not v]
a__, a__ : str = get_raw_scores(__a , __a )
a__ : str = apply_no_ans_threshold(__a , __a , __a , OPTS.na_prob_thresh )
a__ : str = apply_no_ans_threshold(__a , __a , __a , OPTS.na_prob_thresh )
a__ : Tuple = make_eval_dict(__a , __a )
if has_ans_qids:
a__ : str = make_eval_dict(__a , __a , qid_list=__a )
merge_eval(__a , __a , "HasAns" )
if no_ans_qids:
a__ : List[Any] = make_eval_dict(__a , __a , qid_list=__a )
merge_eval(__a , __a , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(__a , __a , __a , __a , __a , __a )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__a , __a , __a , __a , __a , OPTS.out_image_dir )
histogram_na_prob(__a , __a , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(__a , __a , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(__a , __a )
else:
print(json.dumps(__a , indent=2 ) )
if __name__ == "__main__":
UpperCamelCase : Any = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 37 | 0 |
from __future__ import annotations
import numpy as np
def a_ ( lowerCAmelCase_ : Any ):
return np.maximum(0, __a )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 53 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = CLIPTokenizer
_lowercase = CLIPTokenizerFast
_lowercase = True
_lowercase = {}
_lowercase = False
def _UpperCamelCase( self : List[Any] ):
super().setUp()
# fmt: off
a__ : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
a__ : Optional[Any] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
a__ : Optional[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
a__ : Optional[Any] = {"unk_token": "<unk>"}
a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCamelCase__ ) )
def _UpperCamelCase( self : Dict , **lowerCamelCase__ : int ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def _UpperCamelCase( self : Union[str, Any] , **lowerCamelCase__ : Optional[int] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : Optional[Any] ):
a__ : int = "lower newer"
a__ : Optional[int] = "lower newer"
return input_text, output_text
def _UpperCamelCase( self : List[str] ):
a__ : Union[str, Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a__ : int = "lower newer"
a__ : List[str] = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
a__ : Union[str, Any] = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : int = tokens + [tokenizer.unk_token]
a__ : Union[str, Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
@require_ftfy
def _UpperCamelCase( self : Optional[Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a__ : List[str] = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
a__ : Any = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
a__ : int = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
a__ : Optional[Any] = tokenizer_s.tokenize(lowerCamelCase__ )
a__ : Dict = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
a__ : Optional[Any] = "xa\u0303y" + " " + "x\xe3y"
a__ : Optional[int] = tokenizer_s.tokenize(lowerCamelCase__ )
a__ : int = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Test that the tokenization is identical on unicode of space type
a__ : str = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
a__ : Any = tokenizer_s.tokenize(lowerCamelCase__ )
a__ : int = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Test that the tokenization is identical on unicode of line break type
a__ : Union[str, Any] = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
a__ : List[Any] = tokenizer_s.tokenize(lowerCamelCase__ )
a__ : int = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a__ : str = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
a__ : Tuple = f'''{text_of_1_token} {text_of_1_token}'''
a__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , )
a__ : Union[str, Any] = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase__ ) + 1, len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
a__ : Optional[Any] = f''' {text}'''
a__ : str = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , )
a__ : Dict = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase__ ) + 1, 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
def _UpperCamelCase( self : int ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowerCamelCase__ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def _UpperCamelCase( self : int ):
super().test_tokenization_python_rust_equals()
def _UpperCamelCase( self : str ):
# CLIP always lower cases letters
pass
| 37 | 0 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class A_ (A__ ):
def _lowercase ( self , _A ):
'''simple docstring'''
with open(lowerCamelCase__ , encoding='''utf-8''' ) as input_file:
UpperCAmelCase = re.compile(r'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' )
UpperCAmelCase = input_file.read()
UpperCAmelCase = regexp.search(lowerCamelCase__ )
return match
def _lowercase ( self , _A ):
'''simple docstring'''
with open(lowerCamelCase__ , encoding='''utf-8''' ) as input_file:
UpperCAmelCase = re.compile(r'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL )
UpperCAmelCase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCAmelCase = regexp.finditer(lowerCamelCase__ )
UpperCAmelCase = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = Path('''./datasets''' )
UpperCAmelCase = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowerCamelCase__ ) ):
raise AssertionError(F"""open(...) must use utf-8 encoding in {dataset}""" )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = Path('''./datasets''' )
UpperCAmelCase = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_print_statements(str(lowerCamelCase__ ) ):
raise AssertionError(F"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 130 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
UpperCamelCase : Dict = """<<<<<<< This should probably be modified because it mentions: """
UpperCamelCase : List[Any] = """=======
>>>>>>>
"""
UpperCamelCase : Optional[Any] = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
UpperCamelCase : Any = [
# (pattern, replacement)
# Order is important here for some replacements
(r"""tfds\.core""", r"""datasets"""),
(r"""tf\.io\.gfile\.GFile""", r"""open"""),
(r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""),
(r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""),
(r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""),
(r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""),
(r"""tfds\.features\.FeaturesDict\(""", r"""dict("""),
(r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(r"""tfds\.""", r"""datasets."""),
(r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""),
(r"""self\.builder_config""", r"""self.config"""),
]
def UpperCamelCase_ ( __a ) -> Optional[Any]:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class A__ ( A__ ):
"""simple docstring"""
@staticmethod
def _UpperCamelCase( lowerCamelCase__ : ArgumentParser ):
a__ : List[str] = parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str , *lowerCamelCase__ : Tuple ):
a__ : str = get_logger("datasets-cli/converting" )
a__ : Optional[Any] = tfds_path
a__ : Optional[int] = datasets_directory
def _UpperCamelCase( self : int ):
if os.path.isdir(self._tfds_path ):
a__ : List[str] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
a__ : Any = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
a__ : Dict = os.path.abspath(self._datasets_directory )
self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
a__ : Tuple = []
a__ : str = []
a__ : List[Any] = {}
if os.path.isdir(self._tfds_path ):
a__ : List[str] = os.listdir(lowerCamelCase__ )
else:
a__ : Union[str, Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'''Looking at file {f_name}''' )
a__ : Any = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
a__ : Dict = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
if not os.path.isfile(lowerCamelCase__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(lowerCamelCase__ , encoding="utf-8" ) as f:
a__ : List[Any] = f.readlines()
a__ : Union[str, Any] = []
a__ : Union[str, Any] = False
a__ : Union[str, Any] = False
a__ : Dict = []
for line in lines:
a__ : Optional[Any] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
a__ : List[Any] = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
a__ : List[str] = ""
continue
elif "from absl import logging" in out_line:
a__ : Dict = "from datasets import logging\n"
elif "getLogger" in out_line:
a__ : List[Any] = out_line.replace("getLogger" , "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
a__ : List[str] = True
a__ : Dict = list(filter(lambda lowerCamelCase__ : e in out_line , lowerCamelCase__ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCamelCase__ ) + "\n" )
out_lines.append(lowerCamelCase__ )
out_lines.append(lowerCamelCase__ )
continue
else:
for pattern, replacement in TO_CONVERT:
a__ : Tuple = re.sub(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
a__ : Optional[int] = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , lowerCamelCase__ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
a__ : Optional[Any] = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
a__ : Optional[int] = True
out_lines.append(lowerCamelCase__ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
a__ : Dict = f_name.replace(".py" , "" )
a__ : Optional[int] = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
a__ : Any = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
self._logger.info(f'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCamelCase__ )
if needs_manual_update:
with_manual_update.append(lowerCamelCase__ )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f:
f.writelines(lowerCamelCase__ )
self._logger.info(f'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
a__ : Any = os.path.basename(lowerCamelCase__ )
a__ : Optional[int] = imports_to_builder_map[f_name.replace(".py" , "" )]
self._logger.info(f'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(lowerCamelCase__ , lowerCamelCase__ )
except KeyError:
self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 37 | 0 |
"""simple docstring"""
import math
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(__a )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
__lowerCAmelCase : Tuple = """Enter the base and the power separated by a comma: """
__lowerCAmelCase : str = map(int, input(prompt).split(","))
__lowerCAmelCase : Tuple = map(int, input(prompt).split(","))
# We find the log of each number, using the function res(), which takes two
# arguments.
__lowerCAmelCase : Any = res(xa, ya)
__lowerCAmelCase : List[str] = res(xa, ya)
# We check for the largest number
if resa > resa:
print("Largest number is", xa, "^", ya)
elif resa > resa:
print("Largest number is", xa, "^", ya)
else:
print("Both are equal")
| 644 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class A__ ( A__ ):
"""simple docstring"""
_lowercase = ''
_lowercase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_lowercase = None # compression type in fsspec. ex: "gzip"
_lowercase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : List[str] , lowerCamelCase__ : str = "" , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[dict] = None , **lowerCamelCase__ : List[str] ):
super().__init__(self , **lowerCamelCase__ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
a__ : str = fsspec.open(
lowerCamelCase__ , mode="rb" , protocol=lowerCamelCase__ , compression=self.compression , client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
a__ : Optional[int] = os.path.basename(self.file.path.split("::" )[0] )
a__ : int = (
self.compressed_name[: self.compressed_name.rindex("." )]
if "." in self.compressed_name
else self.compressed_name
)
a__ : List[Any] = None
@classmethod
def _UpperCamelCase( cls : int , lowerCamelCase__ : int ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowerCamelCase__ ).lstrip("/" )
def _UpperCamelCase( self : Dict ):
if self.dir_cache is None:
a__ : Dict = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name}
a__ : int = {f["name"]: f}
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : str ):
return self.file.open().read()
def _UpperCamelCase( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str = "rb" , lowerCamelCase__ : int=None , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[str]=None , **lowerCamelCase__ : Optional[Any] , ):
a__ : Optional[int] = self._strip_protocol(lowerCamelCase__ )
if mode != "rb":
raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'bz2'
_lowercase = 'bz2'
_lowercase = '.bz2'
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'gzip'
_lowercase = 'gzip'
_lowercase = '.gz'
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'lz4'
_lowercase = 'lz4'
_lowercase = '.lz4'
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'xz'
_lowercase = 'xz'
_lowercase = '.xz'
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'zstd'
_lowercase = 'zstd'
_lowercase = '.zst'
def __init__( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : str = "rb" , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[dict] = None , lowerCamelCase__ : int = DEFAULT_BLOCK_SIZE , **lowerCamelCase__ : Tuple , ):
super().__init__(
fo=lowerCamelCase__ , mode=lowerCamelCase__ , target_protocol=lowerCamelCase__ , target_options=lowerCamelCase__ , block_size=lowerCamelCase__ , **lowerCamelCase__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
a__ : Any = self.file.__enter__
class A__ :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase__ : str ):
a__ : List[Any] = file_
def __enter__( self : str ):
self._file.__enter__()
return self
def __exit__( self : int , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : int ):
self._file.__exit__(*lowerCamelCase__ , **lowerCamelCase__ )
def __iter__( self : List[str] ):
return iter(self._file )
def _UpperCamelCase( self : Any ):
return next(self._file )
def __getattr__( self : Optional[Any] , lowerCamelCase__ : Tuple ):
return getattr(self._file , lowerCamelCase__ )
def fixed_enter(*lowerCamelCase__ : List[str] , **lowerCamelCase__ : str ):
return WrappedFile(_enter(*lowerCamelCase__ , **lowerCamelCase__ ) )
a__ : Any = fixed_enter
| 37 | 0 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] ):
_UpperCAmelCase : Union[str, Any] = len(__a ), len(grid[0] )
if (
min(__a , __a ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
_UpperCAmelCase : List[Any] = 0
count += depth_first_search(__a , row + 1 , __a , __a )
count += depth_first_search(__a , row - 1 , __a , __a )
count += depth_first_search(__a , __a , col + 1 , __a )
count += depth_first_search(__a , __a , col - 1 , __a )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 506 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]:
a__ : Union[str, Any] = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
a__ : Union[str, Any] = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__a ):
os.makedirs(__a )
a__ : Any = model.state_dict()
def to_tf_var_name(__a ):
for patt, repl in iter(__a ):
a__ : Tuple = name.replace(__a , __a )
return f'''bert/{name}'''
def create_tf_var(__a , __a , __a ):
a__ : Tuple = tf.dtypes.as_dtype(tensor.dtype )
a__ : Dict = tf.get_variable(dtype=__a , shape=tensor.shape , name=__a , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__a )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
a__ : int = to_tf_var_name(__a )
a__ : Union[str, Any] = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
a__ : int = torch_tensor.T
a__ : Optional[Any] = create_tf_var(tensor=__a , name=__a , session=__a )
tf.keras.backend.set_value(__a , __a )
a__ : int = session.run(__a )
print(f'''Successfully created {tf_name}: {np.allclose(__a , __a )}''' )
a__ : Any = tf.train.Saver(tf.trainable_variables() )
saver.save(__a , os.path.join(__a , model_name.replace("-" , "_" ) + ".ckpt" ) )
def UpperCamelCase_ ( __a=None ) -> int:
a__ : Dict = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=__a , required=__a , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=__a , default=__a , required=__a , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=__a , required=__a , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=__a , required=__a , help="Directory in which to save tensorflow model" )
a__ : Optional[Any] = parser.parse_args(__a )
a__ : Tuple = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__a , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 37 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A ( A__ , unittest.TestCase ):
__UpperCAmelCase : Union[str, Any] = DanceDiffusionPipeline
__UpperCAmelCase : List[str] = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__UpperCAmelCase : Dict = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
__UpperCAmelCase : Union[str, Any] = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Any = False
def __lowerCAmelCase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_a = UNetaDModel(
block_out_channels=(3_2, 3_2, 6_4) , extra_in_channels=1_6 , sample_size=5_1_2 , sample_rate=1_6_0_0_0 , in_channels=2 , out_channels=2 , flip_sin_to_cos=lowerCamelCase__ , use_timestep_embedding=lowerCamelCase__ , time_embedding_type="fourier" , mid_block_type="UNetMidBlock1D" , down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , )
_a = IPNDMScheduler()
_a = {
"unet": unet,
"scheduler": scheduler,
}
return components
def __lowerCAmelCase ( self , snake_case_ , snake_case_=0 ) -> Optional[int]:
if str(lowerCamelCase__ ).startswith("mps" ):
_a = torch.manual_seed(lowerCamelCase__ )
else:
_a = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_a = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 4,
}
return inputs
def __lowerCAmelCase ( self ) -> Dict:
_a = "cpu" # ensure determinism for the device-dependent torch.Generator
_a = self.get_dummy_components()
_a = DanceDiffusionPipeline(**lowerCamelCase__ )
_a = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_a = self.get_dummy_inputs(lowerCamelCase__ )
_a = pipe(**lowerCamelCase__ )
_a = output.audios
_a = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_a = np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __lowerCAmelCase ( self ) -> Dict:
return super().test_save_load_local()
@skip_mps
def __lowerCAmelCase ( self ) -> Dict:
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def __lowerCAmelCase ( self ) -> List[str]:
return super().test_save_load_optional_components()
@skip_mps
def __lowerCAmelCase ( self ) -> List[Any]:
return super().test_attention_slicing_forward_pass()
def __lowerCAmelCase ( self ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = torch_device
_a = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" )
_a = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_a = torch.manual_seed(0 )
_a = pipe(generator=lowerCamelCase__ , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
_a = output.audios
_a = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_a = np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self ) -> Any:
_a = torch_device
_a = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" , torch_dtype=torch.floataa )
_a = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_a = torch.manual_seed(0 )
_a = pipe(generator=lowerCamelCase__ , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
_a = output.audios
_a = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_a = np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 131 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class A__ :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str=13 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Any=24 , lowerCamelCase__ : Optional[Any]=16 , lowerCamelCase__ : int=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[Any]=32 , lowerCamelCase__ : List[str]=5 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Optional[Any]=37 , lowerCamelCase__ : Any="gelu" , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : str=10 , lowerCamelCase__ : Optional[Any]=0.02 , lowerCamelCase__ : str=None , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : Optional[Any]=2 , ):
a__ : str = parent
a__ : Any = batch_size
a__ : Dict = patch_size
a__ : List[Any] = max_length
a__ : str = num_mel_bins
a__ : Optional[Any] = is_training
a__ : Optional[int] = use_labels
a__ : List[Any] = hidden_size
a__ : str = num_hidden_layers
a__ : Any = num_attention_heads
a__ : Union[str, Any] = intermediate_size
a__ : List[str] = hidden_act
a__ : str = hidden_dropout_prob
a__ : Tuple = attention_probs_dropout_prob
a__ : List[Any] = type_sequence_label_size
a__ : Any = initializer_range
a__ : str = scope
a__ : List[str] = frequency_stride
a__ : Union[str, Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
a__ : List[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
a__ : List[str] = (self.max_length - self.patch_size) // self.time_stride + 1
a__ : Tuple = frequency_out_dimension * time_out_dimension
a__ : List[str] = num_patches + 2
def _UpperCamelCase( self : List[str] ):
a__ : Any = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
a__ : List[Any] = None
if self.use_labels:
a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : List[str] = self.get_config()
return config, input_values, labels
def _UpperCamelCase( self : Optional[int] ):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] ):
a__ : List[Any] = ASTModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : Dict = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase( self : str ):
a__ : Dict = self.prepare_config_and_inputs()
(
(
a__
), (
a__
), (
a__
),
) : Optional[int] = config_and_inputs
a__ : List[Any] = {"input_values": input_values}
return config, inputs_dict
@require_torch
class A__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
_lowercase = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
_lowercase = False
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _UpperCamelCase( self : str ):
a__ : str = ASTModelTester(self )
a__ : Any = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def _UpperCamelCase( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def _UpperCamelCase( self : List[str] ):
pass
def _UpperCamelCase( self : Optional[int] ):
a__, a__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Any = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def _UpperCamelCase( self : Tuple ):
a__, a__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Dict = model_class(lowerCamelCase__ )
a__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Optional[int] = [*signature.parameters.keys()]
a__ : Optional[Any] = ["input_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
a__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
@slow
def _UpperCamelCase( self : int ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Union[str, Any] = ASTModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def UpperCamelCase_ ( ) -> Any:
a__ : Optional[int] = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" )
a__, a__ : List[str] = torchaudio.load(__a )
return audio, sampling_rate
@require_torch
@require_torchaudio
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase( self : List[str] ):
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def _UpperCamelCase( self : Optional[int] ):
a__ : int = self.default_feature_extractor
a__ : Optional[Any] = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(lowerCamelCase__ )
a__ : Any = self.default_feature_extractor
a__, a__ : Dict = prepare_audio()
a__ : str = audio.squeeze().numpy()
a__ : Any = feature_extractor(lowerCamelCase__ , sampling_rate=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Any = model(**lowerCamelCase__ )
# verify the logits
a__ : Union[str, Any] = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
a__ : List[str] = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 37 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
class a__ :
def __init__( self : Any , UpperCamelCase_ : list[str]):
"""simple docstring"""
__UpperCAmelCase : list[dict] = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []})
for keyword in keywords:
self.add_keyword(lowerCamelCase__)
self.set_fail_transitions()
def a_ ( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : str):
"""simple docstring"""
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def a_ ( self : Any , UpperCamelCase_ : str):
"""simple docstring"""
__UpperCAmelCase : List[str] = 0
for character in keyword:
__UpperCAmelCase : Tuple = self.find_next_state(lowerCamelCase__ , lowerCamelCase__)
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
})
self.adlist[current_state]["next_states"].append(len(self.adlist) - 1)
__UpperCAmelCase : Union[str, Any] = len(self.adlist) - 1
else:
__UpperCAmelCase : List[str] = next_state
self.adlist[current_state]["output"].append(lowerCamelCase__)
def a_ ( self : List[Any]):
"""simple docstring"""
__UpperCAmelCase : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(lowerCamelCase__)
__UpperCAmelCase : Tuple = 0
while q:
__UpperCAmelCase : str = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(lowerCamelCase__)
__UpperCAmelCase : Tuple = self.adlist[r]["fail_state"]
while (
self.find_next_state(lowerCamelCase__ , self.adlist[child]["value"]) is None
and state != 0
):
__UpperCAmelCase : List[Any] = self.adlist[state]["fail_state"]
__UpperCAmelCase : Optional[int] = self.find_next_state(
lowerCamelCase__ , self.adlist[child]["value"])
if self.adlist[child]["fail_state"] is None:
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : Union[str, Any] = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def a_ ( self : Optional[Any] , UpperCamelCase_ : str):
"""simple docstring"""
__UpperCAmelCase : dict = {} # returns a dict with keywords and list of its occurrences
__UpperCAmelCase : Tuple = 0
for i in range(len(lowerCamelCase__)):
while (
self.find_next_state(lowerCamelCase__ , string[i]) is None
and current_state != 0
):
__UpperCAmelCase : Union[str, Any] = self.adlist[current_state]["fail_state"]
__UpperCAmelCase : Optional[Any] = self.find_next_state(lowerCamelCase__ , string[i])
if next_state is None:
__UpperCAmelCase : str = 0
else:
__UpperCAmelCase : int = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
__UpperCAmelCase : Optional[Any] = []
result[key].append(i - len(lowerCamelCase__) + 1)
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 77 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class A__ ( A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = XGLMTokenizer
_lowercase = XGLMTokenizerFast
_lowercase = True
_lowercase = True
def _UpperCamelCase( self : List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
a__ : str = XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCamelCase( self : List[Any] ):
a__ : int = "<pad>"
a__ : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
a__ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(len(lowerCamelCase__ ) , 1_008 )
def _UpperCamelCase( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_008 )
def _UpperCamelCase( self : Optional[int] ):
a__ : str = XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
a__ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
a__ : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a__ : List[str] = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
a__ : Dict = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _UpperCamelCase( self : Dict ):
return XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
def _UpperCamelCase( self : Union[str, Any] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCamelCase__ , f.name )
a__ : Any = XGLMTokenizer(f.name , keep_accents=lowerCamelCase__ )
a__ : List[str] = pickle.dumps(lowerCamelCase__ )
pickle.loads(lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
if not self.test_rust_tokenizer:
return
a__ : Any = self.get_tokenizer()
a__ : Optional[Any] = self.get_rust_tokenizer()
a__ : Tuple = "I was born in 92000, and this is falsé."
a__ : List[str] = tokenizer.tokenize(lowerCamelCase__ )
a__ : Union[str, Any] = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : Optional[int] = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
a__ : Union[str, Any] = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : List[str] = self.get_rust_tokenizer()
a__ : Tuple = tokenizer.encode(lowerCamelCase__ )
a__ : Optional[Any] = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def _UpperCamelCase( self : List[str] ):
a__ : Union[str, Any] = "Hello World!"
a__ : List[str] = [2, 31_227, 4_447, 35]
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def _UpperCamelCase( self : Union[str, Any] ):
a__ : Optional[int] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
a__ : Union[str, Any] = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def _UpperCamelCase( self : List[Any] ):
# fmt: off
a__ : Optional[int] = {
"input_ids": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name="facebook/xglm-564M" , padding=lowerCamelCase__ , )
| 37 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class _SCREAMING_SNAKE_CASE:
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=13 ,SCREAMING_SNAKE_CASE__=7 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=99 ,SCREAMING_SNAKE_CASE__=32 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=4 ,SCREAMING_SNAKE_CASE__=37 ,SCREAMING_SNAKE_CASE__="gelu" ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=5_12 ,SCREAMING_SNAKE_CASE__=16 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=4 ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=10_00 ,) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = parent
__SCREAMING_SNAKE_CASE :Optional[Any] = batch_size
__SCREAMING_SNAKE_CASE :Optional[int] = seq_length
__SCREAMING_SNAKE_CASE :Any = is_training
__SCREAMING_SNAKE_CASE :Tuple = use_input_mask
__SCREAMING_SNAKE_CASE :str = use_token_type_ids
__SCREAMING_SNAKE_CASE :int = use_labels
__SCREAMING_SNAKE_CASE :int = vocab_size
__SCREAMING_SNAKE_CASE :int = hidden_size
__SCREAMING_SNAKE_CASE :List[str] = num_hidden_layers
__SCREAMING_SNAKE_CASE :Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE :Any = intermediate_size
__SCREAMING_SNAKE_CASE :Tuple = hidden_act
__SCREAMING_SNAKE_CASE :List[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE :List[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE :Dict = max_position_embeddings
__SCREAMING_SNAKE_CASE :int = type_vocab_size
__SCREAMING_SNAKE_CASE :str = type_sequence_label_size
__SCREAMING_SNAKE_CASE :List[str] = initializer_range
__SCREAMING_SNAKE_CASE :List[str] = num_labels
__SCREAMING_SNAKE_CASE :Any = num_choices
__SCREAMING_SNAKE_CASE :Union[str, Any] = scope
__SCREAMING_SNAKE_CASE :Dict = range_bbox
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
__SCREAMING_SNAKE_CASE :Any = ids_tensor([self.batch_size, self.seq_length, 4] ,self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__SCREAMING_SNAKE_CASE :int = bbox[i, j, 3]
__SCREAMING_SNAKE_CASE :str = bbox[i, j, 1]
__SCREAMING_SNAKE_CASE :int = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__SCREAMING_SNAKE_CASE :Union[str, Any] = bbox[i, j, 2]
__SCREAMING_SNAKE_CASE :int = bbox[i, j, 0]
__SCREAMING_SNAKE_CASE :Dict = t
__SCREAMING_SNAKE_CASE :List[str] = tf.convert_to_tensor(lowerCamelCase__ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE :List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE :List[Any] = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE :Any = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__SCREAMING_SNAKE_CASE :str = None
__SCREAMING_SNAKE_CASE :str = None
__SCREAMING_SNAKE_CASE :int = None
if self.use_labels:
__SCREAMING_SNAKE_CASE :Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE :int = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__SCREAMING_SNAKE_CASE :str = ids_tensor([self.batch_size] ,self.num_choices )
__SCREAMING_SNAKE_CASE :Tuple = LayoutLMConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = TFLayoutLMModel(config=lowerCamelCase__ )
__SCREAMING_SNAKE_CASE :Any = model(lowerCamelCase__ ,lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ )
__SCREAMING_SNAKE_CASE :List[Any] = model(lowerCamelCase__ ,lowerCamelCase__ ,token_type_ids=lowerCamelCase__ )
__SCREAMING_SNAKE_CASE :Tuple = model(lowerCamelCase__ ,lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = TFLayoutLMForMaskedLM(config=lowerCamelCase__ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = model(lowerCamelCase__ ,lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = self.num_labels
__SCREAMING_SNAKE_CASE :Optional[Any] = TFLayoutLMForSequenceClassification(config=lowerCamelCase__ )
__SCREAMING_SNAKE_CASE :int = model(lowerCamelCase__ ,lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = self.num_labels
__SCREAMING_SNAKE_CASE :Any = TFLayoutLMForTokenClassification(config=lowerCamelCase__ )
__SCREAMING_SNAKE_CASE :int = model(lowerCamelCase__ ,lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = TFLayoutLMForQuestionAnswering(config=lowerCamelCase__ )
__SCREAMING_SNAKE_CASE :List[str] = model(lowerCamelCase__ ,lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.prepare_config_and_inputs()
(
__SCREAMING_SNAKE_CASE
) :List[Any] = config_and_inputs
__SCREAMING_SNAKE_CASE :Optional[int] = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE( A__ , A__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : int = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE_ : str = (
{
'''feature-extraction''': TFLayoutLMModel,
'''fill-mask''': TFLayoutLMForMaskedLM,
'''text-classification''': TFLayoutLMForSequenceClassification,
'''token-classification''': TFLayoutLMForTokenClassification,
'''zero-shot''': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : int = 10
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = TFLayoutLMModelTester(self )
__SCREAMING_SNAKE_CASE :Union[str, Any] = ConfigTester(self ,config_class=lowerCamelCase__ ,hidden_size=37 )
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase__ )
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase__ )
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase__ )
@slow
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE :str = TFLayoutLMModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@unittest.skip('''Onnx compliancy broke with TF 2.10''' )
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
def __lowerCamelCase ( ) -> List[Any]:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
__SCREAMING_SNAKE_CASE :Optional[Any] = tf.convert_to_tensor([[1_01,10_19,10_14,10_16,10_37,1_28_49,47_47,10_04,1_42_46,22_78,54_39,45_24,50_02,29_30,21_93,29_30,43_41,32_08,10_05,10_55,21_71,28_48,1_13_00,35_31,1_02],[1_01,40_70,40_34,70_20,10_24,30_58,10_15,10_13,28_61,10_13,60_70,1_92_74,27_72,62_05,2_78_14,1_61_47,1_61_47,43_43,20_47,1_02_83,1_09_69,1_43_89,10_12,23_38,1_02]] ) # noqa: E231
__SCREAMING_SNAKE_CASE :Optional[int] = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
__SCREAMING_SNAKE_CASE :Tuple = tf.convert_to_tensor([[[0,0,0,0],[4_23,2_37,4_40,2_51],[4_27,2_72,4_41,2_87],[4_19,1_15,4_37,1_29],[9_61,8_85,9_92,9_12],[2_56,38,3_30,58],[2_56,38,3_30,58],[3_36,42,3_53,57],[3_60,39,4_01,56],[3_60,39,4_01,56],[4_11,39,4_71,59],[4_79,41,5_28,59],[5_33,39,6_30,60],[67,1_13,1_34,1_31],[1_41,1_15,2_09,1_32],[68,1_49,1_33,1_66],[1_41,1_49,1_87,1_64],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[2_95,1_48,3_49,1_65],[4_41,1_49,4_92,1_66],[4_97,1_49,5_46,1_64],[64,2_01,1_25,2_18],[10_00,10_00,10_00,10_00]],[[0,0,0,0],[6_62,1_50,7_54,1_66],[6_65,1_99,7_42,2_11],[5_19,2_13,5_54,2_28],[5_19,2_13,5_54,2_28],[1_34,4_33,1_87,4_54],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[3_14,4_69,3_76,4_82],[5_04,6_84,5_82,7_06],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[6_10,7_49,6_52,7_65],[1_30,6_59,1_68,6_72],[1_76,6_57,2_37,6_72],[2_38,6_57,3_12,6_72],[4_43,6_53,6_28,6_72],[4_43,6_53,6_28,6_72],[7_16,3_01,8_25,3_17],[10_00,10_00,10_00,10_00]]] ) # noqa: E231
__SCREAMING_SNAKE_CASE :Any = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
__SCREAMING_SNAKE_CASE :List[str] = tf.convert_to_tensor([[-1_00,10,10,10,9,1,-1_00,7,7,-1_00,7,7,4,2,5,2,8,8,-1_00,-1_00,5,0,3,2,-1_00],[-1_00,12,12,12,-1_00,12,10,-1_00,-1_00,-1_00,-1_00,10,12,9,-1_00,-1_00,-1_00,10,10,10,9,12,-1_00,10,-1_00]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
@slow
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = TFLayoutLMModel.from_pretrained('''microsoft/layoutlm-base-uncased''' )
__SCREAMING_SNAKE_CASE :Tuple = prepare_layoutlm_batch_inputs()
# forward pass
__SCREAMING_SNAKE_CASE :Optional[int] = model(input_ids=lowerCamelCase__ ,bbox=lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ )
# test the sequence output on [0, :3, :3]
__SCREAMING_SNAKE_CASE :Dict = tf.convert_to_tensor(
[[0.1_7_8_5, -0.1_9_4_7, -0.0_4_2_5], [-0.3_2_5_4, -0.2_8_0_7, 0.2_5_5_3], [-0.5_3_9_1, -0.3_3_2_2, 0.3_3_6_4]] ,)
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] ,lowerCamelCase__ ,atol=1E-3 ) )
# test the pooled output on [1, :3]
__SCREAMING_SNAKE_CASE :Tuple = tf.convert_to_tensor([-0.6_5_8_0, -0.0_2_1_4, 0.8_5_5_2] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] ,lowerCamelCase__ ,atol=1E-3 ) )
@slow
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = TFLayoutLMForSequenceClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' ,num_labels=2 )
__SCREAMING_SNAKE_CASE :str = prepare_layoutlm_batch_inputs()
# forward pass
__SCREAMING_SNAKE_CASE :Optional[Any] = model(
input_ids=lowerCamelCase__ ,bbox=lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=tf.convert_to_tensor([1, 1] ) ,)
# test whether we get a loss as a scalar
__SCREAMING_SNAKE_CASE :Union[str, Any] = outputs.loss
__SCREAMING_SNAKE_CASE :Tuple = (2,)
self.assertEqual(loss.shape ,lowerCamelCase__ )
# test the shape of the logits
__SCREAMING_SNAKE_CASE :Dict = outputs.logits
__SCREAMING_SNAKE_CASE :Any = (2, 2)
self.assertEqual(logits.shape ,lowerCamelCase__ )
@slow
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = TFLayoutLMForTokenClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' ,num_labels=13 )
__SCREAMING_SNAKE_CASE :Optional[Any] = prepare_layoutlm_batch_inputs()
# forward pass
__SCREAMING_SNAKE_CASE :Optional[Any] = model(
input_ids=lowerCamelCase__ ,bbox=lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ )
# test the shape of the logits
__SCREAMING_SNAKE_CASE :Any = outputs.logits
__SCREAMING_SNAKE_CASE :List[Any] = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape ,lowerCamelCase__ )
@slow
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = TFLayoutLMForQuestionAnswering.from_pretrained('''microsoft/layoutlm-base-uncased''' )
__SCREAMING_SNAKE_CASE :Union[str, Any] = prepare_layoutlm_batch_inputs()
# forward pass
__SCREAMING_SNAKE_CASE :List[str] = model(input_ids=lowerCamelCase__ ,bbox=lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ )
# test the shape of the logits
__SCREAMING_SNAKE_CASE :Dict = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape ,lowerCamelCase__ )
self.assertEqual(outputs.end_logits.shape ,lowerCamelCase__ ) | 498 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCamelCase_ ( ) -> int:
a__ : int = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"
a__ : Optional[Any] = Image.open(requests.get(__a , stream=__a ).raw ).convert("RGB" )
return image
def UpperCamelCase_ ( __a ) -> Optional[Any]:
a__ : Any = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") )
# fmt: on
return rename_keys
def UpperCamelCase_ ( __a , __a , __a ) -> List[str]:
a__ : Union[str, Any] = dct.pop(__a )
a__ : List[str] = val
def UpperCamelCase_ ( __a , __a ) -> Optional[Any]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
a__ : Any = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
a__ : Tuple = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
a__ : str = torch.cat((q_bias, torch.zeros_like(__a , requires_grad=__a ), v_bias) )
a__ : int = qkv_bias
def UpperCamelCase_ ( __a ) -> Dict:
a__ : Tuple = 364 if "coco" in model_name else 224
a__ : int = InstructBlipVisionConfig(image_size=__a ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
a__ : Tuple = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
a__ : Dict = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
a__ : List[Any] = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" , vocab_size=32_001 ).to_dict()
elif "vicuna-13b" in model_name:
a__ : Optional[int] = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" , vocab_size=32_001 ).to_dict()
else:
raise ValueError("Model name not supported" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
a__ : Optional[Any] = InstructBlipQFormerConfig(vocab_size=30_523 ).to_dict()
a__ : Any = InstructBlipConfig(vision_config=__a , text_config=__a , qformer_config=__a )
return config, image_size
@torch.no_grad()
def UpperCamelCase_ ( __a , __a=None , __a=False ) -> int:
a__ : Tuple = AutoTokenizer.from_pretrained("bert-base-uncased" , truncation_side="left" )
qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} )
if "t5" in model_name:
a__ : List[Any] = TaTokenizerFast.from_pretrained("google/flan-t5-xl" , truncation_side="left" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
a__ : Union[str, Any] = LlamaTokenizerFast.from_pretrained(
"huggyllama/llama-7b" , truncation_side="left" , bos_token="</s>" , unk_token="</s>" )
tokenizer.add_special_tokens({"pad_token": "[PAD]"} )
a__, a__ : List[str] = get_blipa_config(__a )
a__ : Any = InstructBlipForConditionalGeneration(__a ).eval()
a__ : Dict = {
"instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"),
"instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"),
"instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"),
"instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"),
}
a__, a__ : Dict = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
a__ : Optional[Any] = "cuda:1" if torch.cuda.is_available() else "cpu"
a__ : List[Any] = "cuda:2" if torch.cuda.is_available() else "cpu"
a__, a__, a__ : Tuple = load_model_and_preprocess(
name=__a , model_type=__a , is_eval=__a , device=__a )
original_model.eval()
print("Done!" )
# update state dict keys
a__ : Dict = original_model.state_dict()
a__ : Optional[int] = create_rename_keys(__a )
for src, dest in rename_keys:
rename_key(__a , __a , __a )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
a__ : Optional[int] = state_dict.pop(__a )
if key.startswith("Qformer.bert" ):
a__ : List[Any] = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
a__ : Any = key.replace("self" , "attention" )
if "llm_proj" in key:
a__ : Dict = key.replace("llm_proj" , "language_projection" )
if "t5_proj" in key:
a__ : int = key.replace("t5_proj" , "language_projection" )
if key.startswith("llm_model" ):
a__ : List[str] = key.replace("llm_model" , "language_model" )
if key.startswith("t5" ):
a__ : str = key.replace("t5" , "language" )
a__ : Dict = val
# read in qv biases
read_in_q_v_bias(__a , __a )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(__a , strict=__a )
a__ : Union[str, Any] = load_demo_image()
a__ : int = "What is unusual about this image?"
# create processor
a__ : Any = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=__a , image_std=__a )
a__ : Tuple = InstructBlipProcessor(
image_processor=__a , tokenizer=__a , qformer_tokenizer=__a , )
a__ : Tuple = processor(images=__a , text=__a , return_tensors="pt" ).to(__a )
# make sure processor creates exact same pixel values
a__ : Optional[int] = vis_processors["eval"](__a ).unsqueeze(0 ).to(__a )
a__ : Optional[Any] = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , __a )
original_model.to(__a )
hf_model.to(__a )
with torch.no_grad():
if "vicuna" in model_name:
a__ : str = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits
a__ : List[str] = hf_model(**__a ).logits
else:
a__ : List[Any] = original_model(
{"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits
a__ : str = tokenizer("\n" , return_tensors="pt" ).input_ids.to(__a )
a__ : Dict = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
a__ : Any = hf_model(**__a , labels=__a ).logits
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
a__ : Tuple = 1e-4 if "vicuna" in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) , __a , atol=__a )
print("Looks ok!" )
print("Generating with original model..." )
a__ : Tuple = original_model.generate({"image": original_pixel_values, "prompt": prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("Generating with HF model..." )
a__ : int = hf_model.generate(
**__a , do_sample=__a , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
a__ : int = 2
print("Original generation:" , __a )
a__ : str = processor.batch_decode(__a , skip_special_tokens=__a )
a__ : str = [text.strip() for text in output_text]
print("HF generation:" , __a )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__a )
hf_model.save_pretrained(__a )
if push_to_hub:
processor.push_to_hub(f'''Salesforce/{model_name}''' )
hf_model.push_to_hub(f'''Salesforce/{model_name}''' )
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
UpperCamelCase : Optional[int] = [
"""instructblip-vicuna-7b""",
"""instructblip-vicuna-13b""",
"""instructblip-flan-t5-xl""",
"""instructblip-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""instructblip-flan-t5-xl""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
UpperCamelCase : Dict = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 37 | 0 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class a ( A__ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = """ssube/stable-diffusion-x4-upscaler-onnx"""
def lowercase_ ( self , snake_case_=0 ):
'''simple docstring'''
__UpperCAmelCase: List[str] = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowerCamelCase__ ) )
__UpperCAmelCase: Optional[int] = torch.manual_seed(lowerCamelCase__ )
__UpperCAmelCase: Dict = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCAmelCase: Optional[Any] = self.get_dummy_inputs()
__UpperCAmelCase: Any = pipe(**lowerCamelCase__ ).images
__UpperCAmelCase: Dict = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
__UpperCAmelCase: Union[str, Any] = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase: List[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCAmelCase: List[Any] = self.get_dummy_inputs()
__UpperCAmelCase: Optional[Any] = pipe(**lowerCamelCase__ ).images
__UpperCAmelCase: List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__UpperCAmelCase: Tuple = np.array(
[0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase: List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCAmelCase: List[Any] = self.get_dummy_inputs()
__UpperCAmelCase: Dict = pipe(**lowerCamelCase__ ).images
__UpperCAmelCase: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__UpperCAmelCase: Optional[int] = np.array(
[0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase: Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCAmelCase: Any = self.get_dummy_inputs()
__UpperCAmelCase: List[Any] = pipe(**lowerCamelCase__ ).images
__UpperCAmelCase: List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__UpperCAmelCase: Optional[int] = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase: Any = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCAmelCase: Optional[int] = self.get_dummy_inputs()
__UpperCAmelCase: int = pipe(**lowerCamelCase__ ).images
__UpperCAmelCase: Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__UpperCAmelCase: Optional[int] = np.array(
[0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
@property
def lowercase_ ( self ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Any = ort.SessionOptions()
__UpperCAmelCase: Tuple = False
return options
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__UpperCAmelCase: int = init_image.resize((128, 128) )
# using the PNDM scheduler by default
__UpperCAmelCase: Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCAmelCase: Any = "A fantasy landscape, trending on artstation"
__UpperCAmelCase: Dict = torch.manual_seed(0 )
__UpperCAmelCase: Tuple = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCamelCase__ , output_type="""np""" , )
__UpperCAmelCase: Any = output.images
__UpperCAmelCase: Any = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
__UpperCAmelCase: List[str] = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__UpperCAmelCase: Optional[Any] = init_image.resize((128, 128) )
__UpperCAmelCase: Optional[int] = LMSDiscreteScheduler.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""" )
__UpperCAmelCase: Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCAmelCase: Optional[int] = "A fantasy landscape, trending on artstation"
__UpperCAmelCase: Optional[int] = torch.manual_seed(0 )
__UpperCAmelCase: Tuple = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=20 , generator=lowerCamelCase__ , output_type="""np""" , )
__UpperCAmelCase: Dict = output.images
__UpperCAmelCase: Tuple = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
__UpperCAmelCase: Tuple = np.array(
[0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 | 523 |
def UpperCamelCase_ ( __a , __a ) -> Tuple:
a__ : Optional[int] = [0 for i in range(r + 1 )]
# nc0 = 1
a__ : Union[str, Any] = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
a__ : Any = min(__a , __a )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 37 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A ( metaclass=A__ ):
a_ = ['''transformers''', '''torch''', '''note_seq''']
def __init__( self : Optional[int] , *__a : int , **__a : int ) -> List[Any]:
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Union[str, Any] , **__a : Optional[int] ) -> Any:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Optional[int] , **__a : Any ) -> int:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 262 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase : Optional[Any] = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
UpperCamelCase : Dict = {
"""allenai/led-base-16384""": 1_6384,
}
class A__ ( A__ ):
"""simple docstring"""
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = LEDTokenizer
_lowercase = ['input_ids', 'attention_mask']
def __init__( self : Tuple , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : int="replace" , lowerCamelCase__ : Union[str, Any]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Tuple="</s>" , lowerCamelCase__ : Optional[int]="<s>" , lowerCamelCase__ : str="<unk>" , lowerCamelCase__ : Any="<pad>" , lowerCamelCase__ : Any="<mask>" , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : int=True , **lowerCamelCase__ : Union[str, Any] , ):
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , )
a__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space:
a__ : List[str] = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) )
a__ : Optional[Any] = add_prefix_space
a__ : List[str] = pre_tok_class(**lowerCamelCase__ )
a__ : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
a__ : Any = "post_processor"
a__ : str = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
if tokenizer_component_instance:
a__ : Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a__ : Optional[Any] = tuple(state["sep"] )
if "cls" in state:
a__ : Optional[Any] = tuple(state["cls"] )
a__ : Optional[int] = False
if state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space:
a__ : Dict = add_prefix_space
a__ : int = True
if state.get("trim_offsets" , lowerCamelCase__ ) != trim_offsets:
a__ : List[Any] = trim_offsets
a__ : List[str] = True
if changes_to_apply:
a__ : int = getattr(lowerCamelCase__ , state.pop("type" ) )
a__ : int = component_class(**lowerCamelCase__ )
setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _UpperCamelCase( self : Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Union[str, Any] ):
a__ : Any = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value
a__ : Union[str, Any] = value
def _UpperCamelCase( self : Any , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ):
a__ : List[str] = kwargs.get("is_split_into_words" , lowerCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : Any , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Optional[Any] ):
a__ : Dict = kwargs.get("is_split_into_words" , lowerCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ):
a__ : List[str] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=None ):
a__ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ):
a__ : List[str] = [self.sep_token_id]
a__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase( self : Dict , lowerCamelCase__ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , ):
a__ : str = super()._pad(
encoded_inputs=lowerCamelCase__ , max_length=lowerCamelCase__ , padding_strategy=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
# Load from model defaults
if return_attention_mask is None:
a__ : Optional[int] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
a__ : Tuple = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
a__ : Dict = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase__ )
if needs_to_be_padded:
a__ : Union[str, Any] = len(lowerCamelCase__ ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
a__ : List[Any] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
a__ : Any = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 37 | 0 |
'''simple docstring'''
def snake_case ( a_ : List[str] ) -> bool:
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("Program to check whether a number is a Perfect number or not...")
UpperCamelCase =int(input("Enter number: ").strip())
print(f"{number} is {'' if perfect(number) else 'not '}a Perfect Number.")
| 208 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase : Union[str, Any] = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
UpperCamelCase : List[str] = {
"""roberta-base""": 512,
"""roberta-large""": 512,
"""roberta-large-mnli""": 512,
"""distilroberta-base""": 512,
"""roberta-base-openai-detector""": 512,
"""roberta-large-openai-detector""": 512,
}
class A__ ( A__ ):
"""simple docstring"""
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = ['input_ids', 'attention_mask']
_lowercase = RobertaTokenizer
def __init__( self : List[str] , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]="replace" , lowerCamelCase__ : List[str]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Any="</s>" , lowerCamelCase__ : Any="<s>" , lowerCamelCase__ : int="<unk>" , lowerCamelCase__ : Any="<pad>" , lowerCamelCase__ : Tuple="<mask>" , lowerCamelCase__ : Any=False , lowerCamelCase__ : Dict=True , **lowerCamelCase__ : Optional[Any] , ):
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , )
a__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space:
a__ : Any = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) )
a__ : int = add_prefix_space
a__ : Tuple = pre_tok_class(**lowerCamelCase__ )
a__ : str = add_prefix_space
a__ : Tuple = "post_processor"
a__ : Dict = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
if tokenizer_component_instance:
a__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a__ : Tuple = tuple(state["sep"] )
if "cls" in state:
a__ : str = tuple(state["cls"] )
a__ : str = False
if state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space:
a__ : str = add_prefix_space
a__ : Any = True
if state.get("trim_offsets" , lowerCamelCase__ ) != trim_offsets:
a__ : int = trim_offsets
a__ : Dict = True
if changes_to_apply:
a__ : Union[str, Any] = getattr(lowerCamelCase__ , state.pop("type" ) )
a__ : str = component_class(**lowerCamelCase__ )
setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
@property
def _UpperCamelCase( self : Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Tuple ):
a__ : List[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value
a__ : List[str] = value
def _UpperCamelCase( self : Union[str, Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : int ):
a__ : Optional[int] = kwargs.get("is_split_into_words" , lowerCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : Tuple , *lowerCamelCase__ : Dict , **lowerCamelCase__ : List[str] ):
a__ : Dict = kwargs.get("is_split_into_words" , lowerCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : str , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ):
a__ : int = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int]=None ):
a__ : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCamelCase( self : Dict , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ):
a__ : Tuple = [self.sep_token_id]
a__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 37 | 0 |
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
lowercase : Dict = {
"""bart""": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""bert""": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-base-cased-finetuned-mrpc""": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""dpr""": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""gpt2""": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlnet""": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm""": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm-roberta""": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""transfo-xl""": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""openai-gpt""": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""roberta""": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""layoutlm""": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""roberta-large-mnli""": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""camembert""": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""flaubert""": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert""": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert-base-distilled-squad""": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert""": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert-visual-feature-encoder""": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""ctrl""": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""albert""": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""t5""": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""electra""": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""wav2vec2""": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=True ):
if model_type not in MODEL_CLASSES:
raise ValueError(f"""Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.""" )
lowerCamelCase_: List[str] = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
lowerCamelCase_: Optional[Any] = cached_file(__a , __a , force_download=not use_cached_models )
lowerCamelCase_: Tuple = config_class.from_json_file(__a )
lowerCamelCase_: List[Any] = True
lowerCamelCase_: Optional[int] = True
print(f"""Building TensorFlow model from configuration: {config}""" )
lowerCamelCase_: List[str] = model_class(__a )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
lowerCamelCase_: int = cached_file(
__a , __a , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
lowerCamelCase_: List[str] = load_pytorch_checkpoint_in_tfa_model(__a , __a )
if compare_with_pt_model:
lowerCamelCase_: Optional[int] = tf_model(tf_model.dummy_inputs , training=__a ) # build the network
lowerCamelCase_: Union[str, Any] = torch.load(__a , map_location="""cpu""" )
lowerCamelCase_: Optional[Any] = pt_model_class.from_pretrained(
pretrained_model_name_or_path=__a , config=__a , state_dict=__a )
with torch.no_grad():
lowerCamelCase_: Dict = pt_model(**pt_model.dummy_inputs )
lowerCamelCase_: str = pto[0].numpy()
lowerCamelCase_: Union[str, Any] = tfo[0].numpy()
lowerCamelCase_: Any = np.amax(np.abs(np_pt - np_tf ) )
print(f"""Max absolute difference between models outputs {diff}""" )
assert diff <= 2E-2, f"""Error, model absolute difference is >2e-2: {diff}"""
# Save pytorch-model
print(f"""Save TensorFlow model to {tf_dump_path}""" )
tf_model.save_weights(__a , save_format="""h5""" )
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , ):
if args_model_type is None:
lowerCamelCase_: str = list(MODEL_CLASSES.keys() )
else:
lowerCamelCase_: int = [args_model_type]
for j, model_type in enumerate(__a , start=1 ):
print("""=""" * 1_0_0 )
print(f""" Converting model type {j}/{len(__a )}: {model_type}""" )
print("""=""" * 1_0_0 )
if model_type not in MODEL_CLASSES:
raise ValueError(f"""Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.""" )
lowerCamelCase_: List[Any] = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
lowerCamelCase_: int = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
lowerCamelCase_: Optional[Any] = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(__a , __a ) , start=1 ):
print("""-""" * 1_0_0 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f""" Skipping finetuned checkpoint {model_shortcut_name}""" )
continue
lowerCamelCase_: Any = model_shortcut_name
elif only_convert_finetuned_models:
print(f""" Skipping not finetuned checkpoint {model_shortcut_name}""" )
continue
print(
f""" Converting checkpoint {i}/{len(__a )}: {model_shortcut_name} - model_type {model_type}""" )
print("""-""" * 1_0_0 )
if config_shortcut_name in aws_config_map:
lowerCamelCase_: Dict = cached_file(__a , __a , force_download=not use_cached_models )
else:
lowerCamelCase_: str = config_shortcut_name
if model_shortcut_name in aws_model_maps:
lowerCamelCase_: List[str] = cached_file(__a , __a , force_download=not use_cached_models )
else:
lowerCamelCase_: Optional[Any] = model_shortcut_name
if os.path.isfile(__a ):
lowerCamelCase_: int = "converted_model"
convert_pt_checkpoint_to_tf(
model_type=__a , pytorch_checkpoint_path=__a , config_file=__a , tf_dump_path=os.path.join(__a , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=__a , )
if remove_cached_files:
os.remove(__a )
os.remove(__a )
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file."""
)
parser.add_argument(
"""--model_type""",
default=None,
type=str,
help=(
F"Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and "
"""convert all the models from AWS."""
),
)
parser.add_argument(
"""--pytorch_checkpoint_path""",
default=None,
type=str,
help=(
"""Path to the PyTorch checkpoint path or shortcut name to download from AWS. """
"""If not given, will download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
help=(
"""The config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture. If not given and """
"""--pytorch_checkpoint_path is not given or is a shortcut name """
"""use the configuration associated to the shortcut name on the AWS"""
),
)
parser.add_argument(
"""--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions."""
)
parser.add_argument(
"""--use_cached_models""",
action="""store_true""",
help="""Use cached models if possible instead of updating to latest checkpoint versions.""",
)
parser.add_argument(
"""--remove_cached_files""",
action="""store_true""",
help="""Remove pytorch models after conversion (save memory when converting in batches).""",
)
parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""")
lowercase : List[Any] = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 423 |
from statistics import mean, stdev
def UpperCamelCase_ ( __a , __a = 3 ) -> list:
a__ : List[str] = min(__a )
a__ : str = max(__a )
# normalize data
return [round((x - x_min) / (x_max - x_min) , __a ) for x in data]
def UpperCamelCase_ ( __a , __a = 3 ) -> list:
a__ : str = mean(__a )
a__ : List[str] = stdev(__a )
# standardize data
return [round((x - mu) / (sigma) , __a ) for x in data]
| 37 | 0 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
_snake_case : Union[str, Any] = logging.get_logger(__name__)
_snake_case : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
_snake_case : Optional[Any] = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
_snake_case : Dict = {
"""allenai/led-base-16384""": 16384,
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = LEDTokenizer
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Tuple , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : int="replace" , lowerCAmelCase_ : Union[str, Any]="<s>" , lowerCAmelCase_ : Union[str, Any]="</s>" , lowerCAmelCase_ : Tuple="</s>" , lowerCAmelCase_ : Optional[int]="<s>" , lowerCAmelCase_ : str="<unk>" , lowerCAmelCase_ : Any="<pad>" , lowerCAmelCase_ : Any="<mask>" , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : int=True , **lowerCAmelCase_ : Union[str, Any] , ) -> List[Any]:
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowerCamelCase__ ) != add_prefix_space:
__lowerCAmelCase = getattr(lowerCamelCase__ , pre_tok_state.pop('type' ) )
__lowerCAmelCase = add_prefix_space
__lowerCAmelCase = pre_tok_class(**lowerCamelCase__ )
__lowerCAmelCase = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__lowerCAmelCase = "post_processor"
__lowerCAmelCase = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
if tokenizer_component_instance:
__lowerCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__lowerCAmelCase = tuple(state['sep'] )
if "cls" in state:
__lowerCAmelCase = tuple(state['cls'] )
__lowerCAmelCase = False
if state.get('add_prefix_space' , lowerCamelCase__ ) != add_prefix_space:
__lowerCAmelCase = add_prefix_space
__lowerCAmelCase = True
if state.get('trim_offsets' , lowerCamelCase__ ) != trim_offsets:
__lowerCAmelCase = trim_offsets
__lowerCAmelCase = True
if changes_to_apply:
__lowerCAmelCase = getattr(lowerCamelCase__ , state.pop('type' ) )
__lowerCAmelCase = component_class(**lowerCamelCase__ )
setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowercase ( self : Union[str, Any] ) -> Dict:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] ) -> List[Any]:
__lowerCAmelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value
__lowerCAmelCase = value
def lowercase ( self : Any , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase = kwargs.get('is_split_into_words' , lowerCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def lowercase ( self : Any , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase = kwargs.get('is_split_into_words' , lowerCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def lowercase ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> int:
__lowerCAmelCase = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def lowercase ( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any]=None ) -> Dict:
__lowerCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase ( self : Tuple , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[Any]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase ( self : Dict , lowerCAmelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , ) -> Optional[int]:
__lowerCAmelCase = super()._pad(
encoded_inputs=lowerCamelCase__ , max_length=lowerCamelCase__ , padding_strategy=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
# Load from model defaults
if return_attention_mask is None:
__lowerCAmelCase = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__lowerCAmelCase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__lowerCAmelCase = len(encoded_inputs['global_attention_mask'] ) != len(lowerCamelCase__ )
if needs_to_be_padded:
__lowerCAmelCase = len(lowerCamelCase__ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__lowerCAmelCase = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
__lowerCAmelCase = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 53 |
def UpperCamelCase_ ( __a = 50 ) -> int:
a__ : Tuple = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 37 | 0 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A : Union[str, Any] = logging.get_logger(__name__)
__A : int = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all BART models at https://huggingface.co/models?filter=bart
__A : str = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
}
__A : Any = {
"""facebook/bart-base""": 1_024,
"""facebook/bart-large""": 1_024,
"""facebook/bart-large-mnli""": 1_024,
"""facebook/bart-large-cnn""": 1_024,
"""facebook/bart-large-xsum""": 1_024,
"""yjernite/bart_eli5""": 1_024,
}
@lru_cache()
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
UpperCAmelCase = bs[:]
UpperCAmelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__a )
cs.append(2**8 + n )
n += 1
UpperCAmelCase = [chr(__a ) for n in cs]
return dict(zip(__a , __a ) )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Dict:
'''simple docstring'''
UpperCAmelCase = set()
UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase = char
return pairs
class A_ (A__ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self , _A , _A , _A="replace" , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A=False , **_A , ):
'''simple docstring'''
UpperCAmelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else bos_token
UpperCAmelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else eos_token
UpperCAmelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else sep_token
UpperCAmelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else cls_token
UpperCAmelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else unk_token
UpperCAmelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , **lowerCamelCase__ , )
with open(lowerCamelCase__ , encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase = json.load(lowerCamelCase__ )
UpperCAmelCase = {v: k for k, v in self.encoder.items()}
UpperCAmelCase = errors # how to handle errors in decoding
UpperCAmelCase = bytes_to_unicode()
UpperCAmelCase = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase__ , encoding='''utf-8''' ) as merges_handle:
UpperCAmelCase = merges_handle.read().split('''\n''' )[1:-1]
UpperCAmelCase = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
UpperCAmelCase = {}
UpperCAmelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def _lowercase ( self ):
'''simple docstring'''
return len(self.encoder )
def _lowercase ( self ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase ( self , _A ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCAmelCase = tuple(lowerCamelCase__ )
UpperCAmelCase = get_pairs(lowerCamelCase__ )
if not pairs:
return token
while True:
UpperCAmelCase = min(lowerCamelCase__ , key=lambda _A : self.bpe_ranks.get(lowerCamelCase__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase = bigram
UpperCAmelCase = []
UpperCAmelCase = 0
while i < len(lowerCamelCase__ ):
try:
UpperCAmelCase = word.index(lowerCamelCase__ , lowerCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase = j
if word[i] == first and i < len(lowerCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase = tuple(lowerCamelCase__ )
UpperCAmelCase = new_word
if len(lowerCamelCase__ ) == 1:
break
else:
UpperCAmelCase = get_pairs(lowerCamelCase__ )
UpperCAmelCase = " ".join(lowerCamelCase__ )
UpperCAmelCase = word
return word
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = []
for token in re.findall(self.pat , lowerCamelCase__ ):
UpperCAmelCase = "".join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase__ ).split(''' ''' ) )
return bpe_tokens
def _lowercase ( self , _A ):
'''simple docstring'''
return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token ) )
def _lowercase ( self , _A ):
'''simple docstring'''
return self.decoder.get(lowerCamelCase__ )
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = "".join(lowerCamelCase__ )
UpperCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase__ , ensure_ascii=lowerCamelCase__ ) + '''\n''' )
UpperCAmelCase = 0
with open(lowerCamelCase__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
UpperCAmelCase = token_index
writer.write(''' '''.join(lowerCamelCase__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self , _A , _A = None , _A = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self , _A , _A=False , **_A ):
'''simple docstring'''
UpperCAmelCase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase__ ) > 0 and not text[0].isspace()):
UpperCAmelCase = " " + text
return (text, kwargs)
| 130 |
class A__ :
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] ):
a__ : str = name
a__ : Optional[int] = value
a__ : Dict = weight
def __repr__( self : Union[str, Any] ):
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def _UpperCamelCase( self : Dict ):
return self.value
def _UpperCamelCase( self : Optional[Any] ):
return self.name
def _UpperCamelCase( self : Optional[Any] ):
return self.weight
def _UpperCamelCase( self : Optional[int] ):
return self.value / self.weight
def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]:
a__ : Optional[Any] = []
for i in range(len(__a ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def UpperCamelCase_ ( __a , __a , __a ) -> Union[str, Any]:
a__ : List[str] = sorted(__a , key=__a , reverse=__a )
a__ : List[Any] = []
a__, a__ : Union[str, Any] = 0.0, 0.0
for i in range(len(__a ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def UpperCamelCase_ ( ) -> Union[str, Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class a_ ( A__ ):
UpperCamelCase_ : Dict = 42
UpperCamelCase_ : List[str] = 42
UpperCamelCase_ : str = None
class a_ ( A__ , A__ ):
UpperCamelCase_ : int = 2
@register_to_config
def __init__( self : List[Any] , snake_case__ : float = 0.02 , snake_case__ : float = 100 , snake_case__ : float = 1.007 , snake_case__ : float = 80 , snake_case__ : float = 0.05 , snake_case__ : float = 50 , ):
# standard deviation of the initial noise distribution
lowerCAmelCase__ = sigma_max
# setable values
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None # sigma(t_i)
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : torch.FloatTensor , snake_case__ : Optional[int] = None ):
return sample
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : int , snake_case__ : Union[str, torch.device] = None ):
lowerCAmelCase__ = num_inference_steps
lowerCAmelCase__ = np.arange(0 , self.num_inference_steps )[::-1].copy()
lowerCAmelCase__ = torch.from_numpy(lowerCamelCase__ ).to(lowerCamelCase__ )
lowerCAmelCase__ = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
lowerCAmelCase__ = torch.tensor(lowerCamelCase__ , dtype=torch.floataa , device=lowerCamelCase__ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : torch.FloatTensor , snake_case__ : float , snake_case__ : Optional[torch.Generator] = None ):
if self.config.s_min <= sigma <= self.config.s_max:
lowerCAmelCase__ = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
lowerCAmelCase__ = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCAmelCase__ = self.config.s_noise * randn_tensor(sample.shape , generator=lowerCamelCase__ ).to(sample.device )
lowerCAmelCase__ = sigma + gamma * sigma
lowerCAmelCase__ = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : torch.FloatTensor , snake_case__ : float , snake_case__ : float , snake_case__ : torch.FloatTensor , snake_case__ : bool = True , ):
lowerCAmelCase__ = sample_hat + sigma_hat * model_output
lowerCAmelCase__ = (sample_hat - pred_original_sample) / sigma_hat
lowerCAmelCase__ = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCamelCase__ , derivative=lowerCamelCase__ , pred_original_sample=lowerCamelCase__ )
def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : torch.FloatTensor , snake_case__ : float , snake_case__ : float , snake_case__ : torch.FloatTensor , snake_case__ : torch.FloatTensor , snake_case__ : torch.FloatTensor , snake_case__ : bool = True , ):
lowerCAmelCase__ = sample_prev + sigma_prev * model_output
lowerCAmelCase__ = (sample_prev - pred_original_sample) / sigma_prev
lowerCAmelCase__ = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCamelCase__ , derivative=lowerCamelCase__ , pred_original_sample=lowerCamelCase__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : int , snake_case__ : int , snake_case__ : int ):
raise NotImplementedError()
| 644 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class A__ ( A__ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase__ : Union[str, "sqlalchemy.sql.Selectable"] , lowerCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , **lowerCamelCase__ : Optional[int] , ):
super().__init__(features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , **lowerCamelCase__ )
a__ : str = Sql(
cache_dir=lowerCamelCase__ , features=lowerCamelCase__ , sql=lowerCamelCase__ , con=lowerCamelCase__ , **lowerCamelCase__ , )
def _UpperCamelCase( self : Tuple ):
a__ : Optional[Any] = None
a__ : Dict = None
a__ : Union[str, Any] = None
a__ : Union[str, Any] = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , )
# Build dataset for splits
a__ : List[str] = self.builder.as_dataset(
split="train" , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory )
return dataset
class A__ :
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase__ : Dataset , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : Optional[Any] , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
a__ : Any = dataset
a__ : str = name
a__ : Tuple = con
a__ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
a__ : Any = num_proc
a__ : Tuple = to_sql_kwargs
def _UpperCamelCase( self : List[Any] ):
a__ : Any = self.to_sql_kwargs.pop("sql" , lowerCamelCase__ )
a__ : int = self.to_sql_kwargs.pop("con" , lowerCamelCase__ )
a__ : int = self.to_sql_kwargs.pop("index" , lowerCamelCase__ )
a__ : int = self._write(index=lowerCamelCase__ , **self.to_sql_kwargs )
return written
def _UpperCamelCase( self : Any , lowerCamelCase__ : List[str] ):
a__, a__, a__ : Union[str, Any] = args
a__ : Any = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
a__ : Tuple = query_table(
table=self.dataset.data , key=slice(lowerCamelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , )
a__ : str = batch.to_pandas()
a__ : List[Any] = df.to_sql(self.name , self.con , index=lowerCamelCase__ , **lowerCamelCase__ )
return num_rows or len(lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ):
a__ : str = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
a__, a__ : List[str] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , lowerCamelCase__ , lowerCamelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 37 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase :List[str] = logging.get_logger(__name__)
def lowerCamelCase_ (UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any]=False ):
_UpperCAmelCase : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def lowerCamelCase_ (UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : int=False ):
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase : Union[str, Any] = ""
else:
_UpperCAmelCase : str = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase : Tuple = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_UpperCAmelCase : Any = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : List[str] = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase : List[Any] = in_proj_bias[: config.hidden_size]
_UpperCAmelCase : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase : str = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ (UpperCamelCase__ : int ):
_UpperCAmelCase : Union[str, Any] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(__a , __a )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ):
_UpperCAmelCase : Optional[int] = dct.pop(__a )
_UpperCAmelCase : str = val
def lowerCamelCase_ ():
_UpperCAmelCase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : Dict = Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ (UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : List[Any]=True ):
_UpperCAmelCase : Tuple = ViTConfig()
# patch_size
if model_name[-1] == "8":
_UpperCAmelCase : Any = 8
# set labels if required
if not base_model:
_UpperCAmelCase : str = 1000
_UpperCAmelCase : Tuple = "huggingface/label-files"
_UpperCAmelCase : int = "imagenet-1k-id2label.json"
_UpperCAmelCase : Dict = json.load(open(hf_hub_download(__a , __a , repo_type='''dataset''' ) , '''r''' ) )
_UpperCAmelCase : List[Any] = {int(__a ): v for k, v in idalabel.items()}
_UpperCAmelCase : Tuple = idalabel
_UpperCAmelCase : Any = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_UpperCAmelCase : Dict = 384
_UpperCAmelCase : Tuple = 1536
_UpperCAmelCase : str = 12
_UpperCAmelCase : Union[str, Any] = 6
# load original model from torch hub
_UpperCAmelCase : List[Any] = torch.hub.load('''facebookresearch/dino:main''' , __a )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase : Dict = original_model.state_dict()
if base_model:
remove_classification_head_(__a )
_UpperCAmelCase : Any = create_rename_keys(__a , base_model=__a )
for src, dest in rename_keys:
rename_key(__a , __a , __a )
read_in_q_k_v(__a , __a , __a )
# load HuggingFace model
if base_model:
_UpperCAmelCase : List[str] = ViTModel(__a , add_pooling_layer=__a ).eval()
else:
_UpperCAmelCase : List[str] = ViTForImageClassification(__a ).eval()
model.load_state_dict(__a )
# Check outputs on an image, prepared by ViTImageProcessor
_UpperCAmelCase : Optional[int] = ViTImageProcessor()
_UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
_UpperCAmelCase : List[str] = encoding["pixel_values"]
_UpperCAmelCase : List[Any] = model(__a )
if base_model:
_UpperCAmelCase : List[Any] = original_model(__a )
assert torch.allclose(__a , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
_UpperCAmelCase : List[str] = original_model(__a )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__a , outputs.logits , atol=1E-3 )
Path(__a ).mkdir(exist_ok=__a )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__a )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase :Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
_lowerCAmelCase :int = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 506 |
import math
from datetime import datetime, timedelta
def UpperCamelCase_ ( __a ) -> datetime:
a__ : Union[str, Any] = year % 19
a__ : List[str] = year % 4
a__ : str = year % 7
a__ : Any = math.floor(year / 100 )
a__ : List[str] = math.floor((13 + 8 * leap_day_inhibits) / 25 )
a__ : Optional[int] = leap_day_inhibits / 4
a__ : Union[str, Any] = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
a__ : Dict = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
a__ : Any = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
a__ : List[Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__a , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__a , 4 , 18 )
else:
return datetime(__a , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
UpperCamelCase : Tuple = """will be""" if year > datetime.now().year else """was"""
print(f"""Easter in {year} {tense} {gauss_easter(year)}""")
| 37 | 0 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class A ( A__ ):
__UpperCAmelCase : Union[str, Any] = """"""
__UpperCAmelCase : List[Any] = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
__UpperCAmelCase : List[str] = None # compression type in fsspec. ex: "gzip"
__UpperCAmelCase : int = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , snake_case_ = "" , snake_case_ = None , snake_case_ = None , **snake_case_ ) -> Optional[Any]:
super().__init__(self , **lowerCamelCase__ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
_a = fsspec.open(
lowerCamelCase__ , mode="rb" , protocol=lowerCamelCase__ , compression=self.compression , client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
_a = os.path.basename(self.file.path.split("::" )[0] )
_a = (
self.compressed_name[: self.compressed_name.rindex("." )]
if "." in self.compressed_name
else self.compressed_name
)
_a = None
@classmethod
def __lowerCAmelCase ( cls , snake_case_ ) -> Optional[Any]:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowerCamelCase__ ).lstrip("/" )
def __lowerCAmelCase ( self ) -> Any:
if self.dir_cache is None:
_a = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name}
_a = {f["name"]: f}
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[Any]:
return self.file.open().read()
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = "rb" , snake_case_=None , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Union[str, Any]:
_a = self._strip_protocol(lowerCamelCase__ )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class A ( A__ ):
__UpperCAmelCase : Dict = """bz2"""
__UpperCAmelCase : Dict = """bz2"""
__UpperCAmelCase : List[str] = """.bz2"""
class A ( A__ ):
__UpperCAmelCase : List[str] = """gzip"""
__UpperCAmelCase : Union[str, Any] = """gzip"""
__UpperCAmelCase : Optional[int] = """.gz"""
class A ( A__ ):
__UpperCAmelCase : List[Any] = """lz4"""
__UpperCAmelCase : List[Any] = """lz4"""
__UpperCAmelCase : List[str] = """.lz4"""
class A ( A__ ):
__UpperCAmelCase : Any = """xz"""
__UpperCAmelCase : List[str] = """xz"""
__UpperCAmelCase : Any = """.xz"""
class A ( A__ ):
__UpperCAmelCase : int = """zstd"""
__UpperCAmelCase : Optional[Any] = """zstd"""
__UpperCAmelCase : Dict = """.zst"""
def __init__( self , snake_case_ , snake_case_ = "rb" , snake_case_ = None , snake_case_ = None , snake_case_ = DEFAULT_BLOCK_SIZE , **snake_case_ , ) -> int:
super().__init__(
fo=lowerCamelCase__ , mode=lowerCamelCase__ , target_protocol=lowerCamelCase__ , target_options=lowerCamelCase__ , block_size=lowerCamelCase__ , **lowerCamelCase__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
_a = self.file.__enter__
class A :
def __init__( self , snake_case_ ) -> int:
_a = file_
def __enter__( self ) -> int:
self._file.__enter__()
return self
def __exit__( self , *snake_case_ , **snake_case_ ) -> int:
self._file.__exit__(*lowerCamelCase__ , **lowerCamelCase__ )
def __iter__( self ) -> int:
return iter(self._file )
def __lowerCAmelCase ( self ) -> List[str]:
return next(self._file )
def __getattr__( self , snake_case_ ) -> Dict:
return getattr(self._file , lowerCamelCase__ )
def fixed_enter(*snake_case_ , **snake_case_ ):
return WrappedFile(_enter(*lowerCamelCase__ , **lowerCamelCase__ ) )
_a = fixed_enter
| 131 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def UpperCamelCase_ ( __a ) -> Union[str, Any]:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase__ : nn.Module , lowerCamelCase__ : int ):
super().__init__()
a__ : int = module
a__ : Any = nn.Sequential(
nn.Linear(module.in_features , lowerCamelCase__ , bias=lowerCamelCase__ ) , nn.Linear(lowerCamelCase__ , module.out_features , bias=lowerCamelCase__ ) , )
a__ : Tuple = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCamelCase__ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , *lowerCamelCase__ : int , **lowerCamelCase__ : Dict ):
return self.module(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) + self.adapter(lowerCamelCase__ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
"""simple docstring"""
_lowercase = 'bigscience/bloom-1b7'
# Constant values
_lowercase = 2.1_09_65_95_52_69_25_74
_lowercase = 'Hello my name is'
_lowercase = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
_lowercase = 1_0
def _UpperCamelCase( self : Dict ):
# Models and tokenizer
a__ : List[str] = AutoTokenizer.from_pretrained(self.model_name )
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : Union[str, Any] ):
super().setUp()
# Models and tokenizer
a__ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
a__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
def _UpperCamelCase( self : List[Any] ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase( self : List[Any] ):
a__ : str = self.model_abit.config
self.assertTrue(hasattr(lowerCamelCase__ , "quantization_config" ) )
a__ : Optional[Any] = config.to_dict()
a__ : int = config.to_diff_dict()
a__ : List[str] = config.to_json_string()
def _UpperCamelCase( self : int ):
from bitsandbytes.nn import Paramsabit
a__ : List[Any] = self.model_fpaa.get_memory_footprint()
a__ : str = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
a__ : Optional[Any] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def _UpperCamelCase( self : Tuple ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCamelCase__ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def _UpperCamelCase( self : str ):
a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" )
a__ : Tuple = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS )
def _UpperCamelCase( self : List[Any] ):
a__ : Optional[Any] = BitsAndBytesConfig()
a__ : Optional[int] = True
a__ : int = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCamelCase__ , device_map="auto" )
a__ : str = self.tokenizer(self.input_text , return_tensors="pt" )
a__ : int = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS )
def _UpperCamelCase( self : Dict ):
with self.assertRaises(lowerCamelCase__ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCamelCase__ )
def _UpperCamelCase( self : Union[str, Any] ):
a__ : int = BitsAndBytesConfig()
with self.assertRaises(lowerCamelCase__ ):
a__ : Dict = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCamelCase__ , load_in_abit=lowerCamelCase__ , device_map="auto" , bnb_abit_quant_type="nf4" , )
def _UpperCamelCase( self : int ):
with self.assertRaises(lowerCamelCase__ ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(lowerCamelCase__ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCamelCase__ ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(lowerCamelCase__ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCamelCase__ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
a__ : int = self.tokenizer(self.input_text , return_tensors="pt" )
a__ : Any = self.model_fpaa.to(torch.floataa )
a__ : List[Any] = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
a__ : Tuple = self.model_fpaa.to("cpu" )
# Check this does not throw an error
a__ : Tuple = self.model_fpaa.half()
# Check this does not throw an error
a__ : Dict = self.model_fpaa.float()
def _UpperCamelCase( self : Dict ):
a__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=lowerCamelCase__ , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def _UpperCamelCase( cls : str ):
a__ : Dict = "t5-small"
a__ : List[Any] = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
a__ : int = AutoTokenizer.from_pretrained(cls.model_name )
a__ : str = "Translate in German: Hello, my dog is cute"
def _UpperCamelCase( self : Optional[int] ):
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase( self : Optional[int] ):
from transformers import TaForConditionalGeneration
a__ : List[Any] = TaForConditionalGeneration._keep_in_fpaa_modules
a__ : Optional[Any] = None
# test with `t5-small`
a__ : Any = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a__ : int = model.generate(**lowerCamelCase__ )
# test with `flan-t5-small`
a__ : Dict = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a__ : Any = model.generate(**lowerCamelCase__ )
a__ : Union[str, Any] = modules
def _UpperCamelCase( self : List[Any] ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
a__ : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
a__ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a__ : int = model.generate(**lowerCamelCase__ )
# test with `flan-t5-small`
a__ : int = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
a__ : Any = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a__ : Optional[int] = model.generate(**lowerCamelCase__ )
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : List[str] ):
super().setUp()
# model_name
a__ : Union[str, Any] = "bigscience/bloom-560m"
a__ : Union[str, Any] = "t5-small"
# Different types of model
a__ : int = AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
# Sequence classification model
a__ : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
# CausalLM model
a__ : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
# Seq2seq model
a__ : Dict = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
def _UpperCamelCase( self : List[Any] ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase( self : Union[str, Any] ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : Dict ):
super().setUp()
def _UpperCamelCase( self : int ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase( self : Tuple ):
a__ : int = pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
a__ : Tuple = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : Tuple ):
super().setUp()
def _UpperCamelCase( self : List[Any] ):
a__ : str = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCamelCase__ , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
a__ : List[Any] = self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
a__ : List[Any] = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS )
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : Dict ):
a__ : Any = "facebook/opt-350m"
super().setUp()
def _UpperCamelCase( self : int ):
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
a__ : Tuple = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
a__ : Any = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
a__ : Tuple = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCamelCase__ ) ):
a__ : Dict = LoRALayer(module.q_proj , rank=16 )
a__ : List[Any] = LoRALayer(module.k_proj , rank=16 )
a__ : List[Any] = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
a__ : Dict = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
a__ : Optional[Any] = model.forward(**lowerCamelCase__ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCamelCase__ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'gpt2-xl'
_lowercase = 3.31_91_85_48_54_15_21_87
| 37 | 0 |
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
A = {
"""<""": operator.lt,
"""<=""": operator.le,
"""==""": operator.eq,
"""!=""": operator.ne,
""">=""": operator.ge,
""">""": operator.gt,
}
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
f"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"
f" reinstalling {pkg}." )
if not ops[op](version.parse(__a ) , version.parse(__a ) ):
raise ImportError(
f"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" )
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase = None ) -> None:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = f"\n{hint}" if hint is not None else ""
# non-versioned check
if re.match(R"^[\w_\-\d]+$" , __a ):
__UpperCAmelCase : Dict = requirement, None, None
else:
__UpperCAmelCase : Any = re.findall(R"^([^!=<>\s]+)([\s!=<>]{1,2}.+)" , __a )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"
f" got {requirement}" )
__UpperCAmelCase : Optional[Any] = match[0]
__UpperCAmelCase : str = want_full.split("," ) # there could be multiple requirements
__UpperCAmelCase : Tuple = {}
for w in want_range:
__UpperCAmelCase : List[Any] = re.findall(R"^([\s!=<>]{1,2})(.+)" , __a )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"
f" but got {requirement}" )
__UpperCAmelCase : Tuple = match[0]
__UpperCAmelCase : Optional[int] = want_ver
if op not in ops:
raise ValueError(f"{requirement}: need one of {list(ops.keys() )}, but got {op}" )
# special case
if pkg == "python":
__UpperCAmelCase : Union[str, Any] = ".".join([str(__a ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(__a , __a , __a , __a , __a , __a )
return
# check if any version is installed
try:
__UpperCAmelCase : Optional[int] = importlib.metadata.version(__a )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"The \'{requirement}\' distribution was not found and is required by this application. {hint}" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(__a , __a , __a , __a , __a , __a )
def _UpperCamelCase ( UpperCamelCase ) -> str:
"""simple docstring"""
__UpperCAmelCase : Dict = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
return require_version(__a , __a )
| 77 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class A__ :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int]=100 , lowerCamelCase__ : str=13 , lowerCamelCase__ : Optional[int]=30 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : int=32 , lowerCamelCase__ : Union[str, Any]=4 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Union[str, Any]=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Union[str, Any]=10 , lowerCamelCase__ : str=0.02 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]=[0, 1, 2, 3] , ):
a__ : Dict = parent
a__ : Dict = 100
a__ : Optional[int] = batch_size
a__ : Union[str, Any] = image_size
a__ : Any = patch_size
a__ : Optional[Any] = num_channels
a__ : int = is_training
a__ : List[str] = use_labels
a__ : Optional[Any] = hidden_size
a__ : List[Any] = num_hidden_layers
a__ : str = num_attention_heads
a__ : str = intermediate_size
a__ : int = hidden_act
a__ : List[Any] = hidden_dropout_prob
a__ : Dict = attention_probs_dropout_prob
a__ : Union[str, Any] = type_sequence_label_size
a__ : Optional[Any] = initializer_range
a__ : List[str] = scope
a__ : int = out_indices
a__ : List[str] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a__ : Optional[int] = (image_size // patch_size) ** 2
a__ : Union[str, Any] = num_patches + 1
def _UpperCamelCase( self : int ):
a__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ : Optional[Any] = None
a__ : Tuple = None
if self.use_labels:
a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a__ : Optional[int] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _UpperCamelCase( self : Tuple ):
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any ):
a__ : str = BeitModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ):
a__ : int = BeitForMaskedImageModeling(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : List[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _UpperCamelCase( self : str , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any ):
a__ : List[str] = self.type_sequence_label_size
a__ : Optional[Any] = BeitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : str = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a__ : Optional[Any] = 1
a__ : List[str] = BeitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a__ : Union[str, Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCamelCase( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ):
a__ : int = self.num_labels
a__ : List[str] = BeitForSemanticSegmentation(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : Tuple = model(lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
a__ : str = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _UpperCamelCase( self : Optional[int] ):
a__ : Any = self.prepare_config_and_inputs()
a__, a__, a__, a__ : Union[str, Any] = config_and_inputs
a__ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
_lowercase = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
def _UpperCamelCase( self : Any ):
a__ : int = BeitModelTester(self )
a__ : Optional[Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def _UpperCamelCase( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def _UpperCamelCase( self : str ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def _UpperCamelCase( self : Dict ):
pass
def _UpperCamelCase( self : Optional[Any] ):
a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : List[str] = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def _UpperCamelCase( self : str ):
a__, a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : int = model_class(lowerCamelCase__ )
a__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Optional[int] = [*signature.parameters.keys()]
a__ : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
a__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _UpperCamelCase( self : int ):
a__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
a__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] ):
a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
if not self.model_tester.is_training:
return
a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a__ : str = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling]:
continue
a__ : List[str] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
a__ : Any = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
a__ : Tuple = model(**lowerCamelCase__ ).loss
loss.backward()
def _UpperCamelCase( self : Tuple ):
a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
a__ : List[Any] = False
a__ : List[str] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
a__ : Optional[Any] = model_class(lowerCamelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase__ )
model.train()
a__ : Union[str, Any] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
a__ : int = model(**lowerCamelCase__ ).loss
loss.backward()
def _UpperCamelCase( self : List[str] ):
a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Dict = _config_zero_init(lowerCamelCase__ )
for model_class in self.all_model_classes:
a__ : str = model_class(config=lowerCamelCase__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def _UpperCamelCase( self : Optional[int] ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Tuple = BeitModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def UpperCamelCase_ ( ) -> Any:
a__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase( self : Optional[int] ):
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def _UpperCamelCase( self : str ):
a__ : int = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(lowerCamelCase__ )
a__ : Optional[Any] = self.default_image_processor
a__ : Dict = prepare_img()
a__ : Optional[int] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).pixel_values.to(lowerCamelCase__ )
# prepare bool_masked_pos
a__ : Optional[Any] = torch.ones((1, 196) , dtype=torch.bool ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Any = model(pixel_values=lowerCamelCase__ , bool_masked_pos=lowerCamelCase__ )
a__ : Tuple = outputs.logits
# verify the logits
a__ : List[str] = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape , lowerCamelCase__ )
a__ : Optional[int] = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , lowerCamelCase__ , atol=1E-2 ) )
@slow
def _UpperCamelCase( self : Dict ):
a__ : str = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(lowerCamelCase__ )
a__ : int = self.default_image_processor
a__ : List[Any] = prepare_img()
a__ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Union[str, Any] = model(**lowerCamelCase__ )
a__ : List[str] = outputs.logits
# verify the logits
a__ : Union[str, Any] = torch.Size((1, 1_000) )
self.assertEqual(logits.shape , lowerCamelCase__ )
a__ : int = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
a__ : Tuple = 281
self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ )
@slow
def _UpperCamelCase( self : Any ):
a__ : Dict = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
lowerCamelCase__ )
a__ : str = self.default_image_processor
a__ : List[str] = prepare_img()
a__ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Dict = model(**lowerCamelCase__ )
a__ : List[str] = outputs.logits
# verify the logits
a__ : Optional[int] = torch.Size((1, 21_841) )
self.assertEqual(logits.shape , lowerCamelCase__ )
a__ : Optional[Any] = torch.tensor([1.6881, -0.2787, 0.5901] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
a__ : Optional[Any] = 2_396
self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ )
@slow
def _UpperCamelCase( self : int ):
a__ : Optional[Any] = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
a__ : Tuple = model.to(lowerCamelCase__ )
a__ : List[Any] = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ )
a__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
a__ : Union[str, Any] = Image.open(ds[0]["file"] )
a__ : List[Any] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Optional[Any] = model(**lowerCamelCase__ )
a__ : List[str] = outputs.logits
# verify the logits
a__ : Tuple = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , lowerCamelCase__ )
a__ : int = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
a__ : Dict = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=lowerCamelCase__ , )
else:
a__ : Dict = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=lowerCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def _UpperCamelCase( self : Tuple ):
a__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
a__ : List[Any] = model.to(lowerCamelCase__ )
a__ : int = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ )
a__ : Optional[int] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
a__ : str = Image.open(ds[0]["file"] )
a__ : str = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : List[Any] = model(**lowerCamelCase__ )
a__ : Any = outputs.logits.detach().cpu()
a__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ , target_sizes=[(500, 300)] )
a__ : Optional[int] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
a__ : List[str] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ )
a__ : Any = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
| 37 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase_ = {
"""vocab_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"""
),
"""squeezebert/squeezebert-mnli""": """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt""",
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"""
),
},
}
lowerCamelCase_ = {
"""squeezebert/squeezebert-uncased""": 5_1_2,
"""squeezebert/squeezebert-mnli""": 5_1_2,
"""squeezebert/squeezebert-mnli-headless""": 5_1_2,
}
lowerCamelCase_ = {
"""squeezebert/squeezebert-uncased""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli-headless""": {"""do_lower_case""": True},
}
class _SCREAMING_SNAKE_CASE( A__ ):
SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Any = SqueezeBertTokenizer
def __init__( self ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__="[UNK]" ,SCREAMING_SNAKE_CASE__="[SEP]" ,SCREAMING_SNAKE_CASE__="[PAD]" ,SCREAMING_SNAKE_CASE__="[CLS]" ,SCREAMING_SNAKE_CASE__="[MASK]" ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=None ,**SCREAMING_SNAKE_CASE__ ,) -> Optional[int]:
"""simple docstring"""
super().__init__(
lowerCamelCase__ ,tokenizer_file=lowerCamelCase__ ,do_lower_case=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,sep_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,cls_token=lowerCamelCase__ ,mask_token=lowerCamelCase__ ,tokenize_chinese_chars=lowerCamelCase__ ,strip_accents=lowerCamelCase__ ,**lowerCamelCase__ ,)
__SCREAMING_SNAKE_CASE :List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' ,lowerCamelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' ,lowerCamelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' ,lowerCamelCase__ ) != tokenize_chinese_chars
):
__SCREAMING_SNAKE_CASE :Union[str, Any] = getattr(lowerCamelCase__ ,normalizer_state.pop('''type''' ) )
__SCREAMING_SNAKE_CASE :List[Any] = do_lower_case
__SCREAMING_SNAKE_CASE :Optional[Any] = strip_accents
__SCREAMING_SNAKE_CASE :str = tokenize_chinese_chars
__SCREAMING_SNAKE_CASE :Any = normalizer_class(**lowerCamelCase__ )
__SCREAMING_SNAKE_CASE :Any = do_lower_case
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = None ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = [self.sep_token_id]
__SCREAMING_SNAKE_CASE :Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = None ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = self._tokenizer.model.save(lowerCamelCase__ ,name=lowerCamelCase__ )
return tuple(lowerCamelCase__ ) | 498 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCamelCase : Dict = logging.get_logger(__name__)
def UpperCamelCase_ ( __a ) -> Union[str, Any]:
a__ : Tuple = R"\w+[.]\d+"
a__ : List[Any] = re.findall(__a , __a )
for pat in pats:
a__ : Union[str, Any] = key.replace(__a , "_".join(pat.split("." ) ) )
return key
def UpperCamelCase_ ( __a , __a , __a ) -> List[str]:
a__ : List[str] = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
a__ : Any = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
a__ : Optional[Any] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
a__ : Union[str, Any] = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
a__ : List[str] = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
a__ : str = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
a__ : Tuple = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
a__ : Tuple = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
a__ : Optional[Any] = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
a__ : Union[str, Any] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCamelCase_ ( __a , __a , __a=42 ) -> str:
# Step 1: Convert pytorch tensor to numpy
a__ : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
a__ : Tuple = flax_model.init_weights(PRNGKey(__a ) )
a__ : Optional[Any] = flatten_dict(__a )
a__ : Union[str, Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
a__ : Optional[int] = rename_key(__a )
a__ : Optional[int] = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
a__, a__ : Union[str, Any] = rename_key_and_reshape_tensor(__a , __a , __a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
a__ : str = jnp.asarray(__a )
return unflatten_dict(__a )
| 37 | 0 |
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class a :
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=0.2 , snake_case_=0.2 ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = bp_numa
__UpperCAmelCase: Tuple = bp_numa
__UpperCAmelCase: Optional[Any] = bp_numa
__UpperCAmelCase: str = conva_get[:2]
__UpperCAmelCase: Optional[int] = conva_get[2]
__UpperCAmelCase: Any = size_pa
__UpperCAmelCase: List[str] = rate_w
__UpperCAmelCase: str = rate_t
__UpperCAmelCase: Optional[int] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
__UpperCAmelCase: Optional[int] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
__UpperCAmelCase: Any = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
__UpperCAmelCase: List[str] = -2 * np.random.rand(self.conva[1] ) + 1
__UpperCAmelCase: List[str] = -2 * np.random.rand(self.num_bpa ) + 1
__UpperCAmelCase: List[Any] = -2 * np.random.rand(self.num_bpa ) + 1
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: int = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(lowerCamelCase__ , """wb""" ) as f:
pickle.dump(lowerCamelCase__ , lowerCamelCase__ )
print(F'''Model saved: {save_path}''' )
@classmethod
def lowercase_ ( cls , snake_case_ ):
'''simple docstring'''
with open(lowerCamelCase__ , """rb""" ) as f:
__UpperCAmelCase: Tuple = pickle.load(lowerCamelCase__ ) # noqa: S301
__UpperCAmelCase: List[str] = model_dic.get("""conv1""" )
conv_get.append(model_dic.get("""step_conv1""" ) )
__UpperCAmelCase: Optional[Any] = model_dic.get("""size_pooling1""" )
__UpperCAmelCase: Optional[Any] = model_dic.get("""num_bp1""" )
__UpperCAmelCase: Tuple = model_dic.get("""num_bp2""" )
__UpperCAmelCase: int = model_dic.get("""num_bp3""" )
__UpperCAmelCase: Tuple = model_dic.get("""rate_weight""" )
__UpperCAmelCase: Optional[Any] = model_dic.get("""rate_thre""" )
# create model instance
__UpperCAmelCase: Tuple = CNN(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# modify model parameter
__UpperCAmelCase: Tuple = model_dic.get("""w_conv1""" )
__UpperCAmelCase: int = model_dic.get("""wkj""" )
__UpperCAmelCase: List[str] = model_dic.get("""vji""" )
__UpperCAmelCase: Optional[int] = model_dic.get("""thre_conv1""" )
__UpperCAmelCase: Optional[int] = model_dic.get("""thre_bp2""" )
__UpperCAmelCase: Optional[int] = model_dic.get("""thre_bp3""" )
return conv_ins
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x ))
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
return round(lowerCamelCase__ , 3 )
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Any = convs[0]
__UpperCAmelCase: str = convs[1]
__UpperCAmelCase: int = np.shape(lowerCamelCase__ )[0]
# get the data slice of original image data, data_focus
__UpperCAmelCase: Optional[int] = []
for i_focus in range(0 , size_data - size_conv + 1 , lowerCamelCase__ ):
for j_focus in range(0 , size_data - size_conv + 1 , lowerCamelCase__ ):
__UpperCAmelCase: List[str] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowerCamelCase__ )
# calculate the feature map of every single kernel, and saved as list of matrix
__UpperCAmelCase: str = []
__UpperCAmelCase: Dict = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(lowerCamelCase__ ):
__UpperCAmelCase: Optional[Any] = []
for i_focus in range(len(lowerCamelCase__ ) ):
__UpperCAmelCase: Tuple = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(lowerCamelCase__ ) )
__UpperCAmelCase: Optional[int] = np.asmatrix(lowerCamelCase__ ).reshape(
lowerCamelCase__ , lowerCamelCase__ )
data_featuremap.append(lowerCamelCase__ )
# expanding the data slice to One dimenssion
__UpperCAmelCase: Union[str, Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowerCamelCase__ ) )
__UpperCAmelCase: Union[str, Any] = np.asarray(lowerCamelCase__ )
return focus_list, data_featuremap
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_="average_pool" ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = len(featuremaps[0] )
__UpperCAmelCase: Union[str, Any] = int(size_map / size_pooling )
__UpperCAmelCase: List[str] = []
for i_map in range(len(lowerCamelCase__ ) ):
__UpperCAmelCase: str = featuremaps[i_map]
__UpperCAmelCase: Tuple = []
for i_focus in range(0 , lowerCamelCase__ , lowerCamelCase__ ):
for j_focus in range(0 , lowerCamelCase__ , lowerCamelCase__ ):
__UpperCAmelCase: Any = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowerCamelCase__ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowerCamelCase__ ) )
__UpperCAmelCase: List[str] = np.asmatrix(lowerCamelCase__ ).reshape(lowerCamelCase__ , lowerCamelCase__ )
featuremap_pooled.append(lowerCamelCase__ )
return featuremap_pooled
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = []
for i in range(len(lowerCamelCase__ ) ):
__UpperCAmelCase: Dict = np.shape(data[i] )
__UpperCAmelCase: Union[str, Any] = data[i].reshape(1 , shapes[0] * shapes[1] )
__UpperCAmelCase: List[Any] = data_listed.getA().tolist()[0]
data_expanded.extend(lowerCamelCase__ )
__UpperCAmelCase: Dict = np.asarray(lowerCamelCase__ )
return data_expanded
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: str = np.asarray(lowerCamelCase__ )
__UpperCAmelCase: Optional[int] = np.shape(lowerCamelCase__ )
__UpperCAmelCase: Union[str, Any] = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: int = []
__UpperCAmelCase: int = 0
for i_map in range(lowerCamelCase__ ):
__UpperCAmelCase: List[Any] = np.ones((size_map, size_map) )
for i in range(0 , lowerCamelCase__ , lowerCamelCase__ ):
for j in range(0 , lowerCamelCase__ , lowerCamelCase__ ):
__UpperCAmelCase: Union[str, Any] = pd_pool[
i_pool
]
__UpperCAmelCase: str = i_pool + 1
__UpperCAmelCase: Any = np.multiply(
lowerCamelCase__ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(lowerCamelCase__ )
return pd_all
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=bool ):
'''simple docstring'''
print("""----------------------Start Training-------------------------""" )
print((""" - - Shape: Train_Data """, np.shape(lowerCamelCase__ )) )
print((""" - - Shape: Teach_Data """, np.shape(lowerCamelCase__ )) )
__UpperCAmelCase: str = 0
__UpperCAmelCase: List[str] = []
__UpperCAmelCase: int = 1_0000
while rp < n_repeat and mse >= error_accuracy:
__UpperCAmelCase: Optional[int] = 0
print(F'''-------------Learning Time {rp}--------------''' )
for p in range(len(lowerCamelCase__ ) ):
# print('------------Learning Image: %d--------------'%p)
__UpperCAmelCase: Optional[Any] = np.asmatrix(datas_train[p] )
__UpperCAmelCase: str = np.asarray(datas_teach[p] )
__UpperCAmelCase: Dict = self.convolute(
lowerCamelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__UpperCAmelCase: int = self.pooling(lowerCamelCase__ , self.size_poolinga )
__UpperCAmelCase: Dict = np.shape(lowerCamelCase__ )
__UpperCAmelCase: List[str] = self._expand(lowerCamelCase__ )
__UpperCAmelCase: Dict = data_bp_input
__UpperCAmelCase: Dict = np.dot(lowerCamelCase__ , self.vji.T ) - self.thre_bpa
__UpperCAmelCase: Optional[int] = self.sig(lowerCamelCase__ )
__UpperCAmelCase: int = np.dot(lowerCamelCase__ , self.wkj.T ) - self.thre_bpa
__UpperCAmelCase: int = self.sig(lowerCamelCase__ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
__UpperCAmelCase: Tuple = np.multiply(
(data_teach - bp_outa) , np.multiply(lowerCamelCase__ , (1 - bp_outa) ) )
__UpperCAmelCase: Optional[Any] = np.multiply(
np.dot(lowerCamelCase__ , self.wkj ) , np.multiply(lowerCamelCase__ , (1 - bp_outa) ) )
__UpperCAmelCase: Optional[Any] = np.dot(lowerCamelCase__ , self.vji )
__UpperCAmelCase: str = pd_i_all / (self.size_poolinga * self.size_poolinga)
__UpperCAmelCase: Optional[Any] = pd_conva_pooled.T.getA().tolist()
__UpperCAmelCase: Dict = self._calculate_gradient_from_pool(
lowerCamelCase__ , lowerCamelCase__ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
__UpperCAmelCase: List[str] = self._expand_mat(pd_conva_all[k_conv] )
__UpperCAmelCase: Any = self.rate_weight * np.dot(lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase: List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
__UpperCAmelCase: Any = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
__UpperCAmelCase: Optional[Any] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
__UpperCAmelCase: str = self.vji + pd_j_all.T * bp_outa * self.rate_weight
__UpperCAmelCase: Optional[int] = self.thre_bpa - pd_k_all * self.rate_thre
__UpperCAmelCase: List[str] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
__UpperCAmelCase: str = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
__UpperCAmelCase: str = rp + 1
__UpperCAmelCase: Tuple = error_count / patterns
all_mse.append(lowerCamelCase__ )
def draw_error():
__UpperCAmelCase: Optional[int] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(lowerCamelCase__ , """+-""" )
plt.plot(lowerCamelCase__ , """r--""" )
plt.xlabel("""Learning Times""" )
plt.ylabel("""All_mse""" )
plt.grid(lowerCamelCase__ , alpha=0.5 )
plt.show()
print("""------------------Training Complished---------------------""" )
print((""" - - Training epoch: """, rp, F''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = []
print("""-------------------Start Testing-------------------------""" )
print((""" - - Shape: Test_Data """, np.shape(lowerCamelCase__ )) )
for p in range(len(lowerCamelCase__ ) ):
__UpperCAmelCase: str = np.asmatrix(datas_test[p] )
__UpperCAmelCase: Tuple = self.convolute(
lowerCamelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__UpperCAmelCase: Optional[int] = self.pooling(lowerCamelCase__ , self.size_poolinga )
__UpperCAmelCase: Any = self._expand(lowerCamelCase__ )
__UpperCAmelCase: Any = data_bp_input
__UpperCAmelCase: str = bp_outa * self.vji.T - self.thre_bpa
__UpperCAmelCase: Tuple = self.sig(lowerCamelCase__ )
__UpperCAmelCase: Any = bp_outa * self.wkj.T - self.thre_bpa
__UpperCAmelCase: List[str] = self.sig(lowerCamelCase__ )
produce_out.extend(bp_outa.getA().tolist() )
__UpperCAmelCase: List[str] = [list(map(self.do_round , lowerCamelCase__ ) ) for each in produce_out]
return np.asarray(lowerCamelCase__ )
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = np.asmatrix(lowerCamelCase__ )
__UpperCAmelCase: List[Any] = self.convolute(
lowerCamelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__UpperCAmelCase: Any = self.pooling(lowerCamelCase__ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass | 523 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCamelCase_ ( ) -> int:
a__ : Any = HfArgumentParser(__a )
a__ : Any = parser.parse_args_into_dataclasses()[0]
a__ : Optional[int] = TensorFlowBenchmark(args=__a )
try:
a__ : Optional[int] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
a__ : Tuple = "Arg --no_{0} is no longer used, please use --no-{0} instead."
a__ : List[Any] = " ".join(str(__a ).split(" " )[:-1] )
a__ : str = ""
a__ : List[Any] = eval(str(__a ).split(" " )[-1] )
a__ : List[str] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__a )
if len(__a ) > 0:
a__ : Tuple = full_error_msg + begin_error_msg + str(__a )
raise ValueError(__a )
benchmark.run()
if __name__ == "__main__":
main()
| 37 | 0 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowerCAmelCase ( UpperCamelCase__ : Any ):
"""simple docstring"""
__UpperCAmelCase = []
if isinstance(__a , __a ):
for v in tree.values():
shapes.extend(_fetch_dims(__a ) )
elif isinstance(__a , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(__a ) )
elif isinstance(__a , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('''Not supported''' )
return shapes
@torch.jit.ignore
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = []
for d in reversed(__a ):
idx.append(flat_idx % d )
__UpperCAmelCase = flat_idx // d
return tuple(reversed(__a ) )
@torch.jit.ignore
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any = None , UpperCamelCase__ : Optional[Any] = None , ):
"""simple docstring"""
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(UpperCamelCase__ : Union[str, Any] ) -> None:
__UpperCAmelCase = True
for i in range(len(__a ) ):
__UpperCAmelCase = -1 * (i + 1)
l[reversed_idx] &= tally
__UpperCAmelCase = l[reversed_idx]
if start_edges is None:
__UpperCAmelCase = [s == 0 for s in start]
reduce_edge_list(__a )
if end_edges is None:
__UpperCAmelCase = [e == (d - 1) for e, d in zip(__a , __a )]
reduce_edge_list(__a )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(__a ) == 0:
return [()]
elif len(__a ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
__UpperCAmelCase = []
__UpperCAmelCase = []
# Dimensions common to start and end can be selected directly
for s, e in zip(__a , __a ):
if s == e:
path_list.append(slice(__a , s + 1 ) )
else:
break
__UpperCAmelCase = tuple(__a )
__UpperCAmelCase = len(__a )
# start == end, and we're done
if divergence_idx == len(__a ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__UpperCAmelCase = start[divergence_idx]
return tuple(
path + (slice(__a , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__UpperCAmelCase = end[divergence_idx]
return tuple(
path + (slice(__a , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
__UpperCAmelCase = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def lowerCAmelCase ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
__UpperCAmelCase = t.shape[:no_batch_dims]
__UpperCAmelCase = list(_flat_idx_to_idx(__a , __a ) )
# _get_minimal_slice_set is inclusive
__UpperCAmelCase = list(_flat_idx_to_idx(flat_end - 1 , __a ) )
# Get an ordered list of slices to perform
__UpperCAmelCase = _get_minimal_slice_set(
__a , __a , __a , )
__UpperCAmelCase = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : str = False , UpperCamelCase__ : Dict = None , UpperCamelCase__ : int = False , ):
"""simple docstring"""
if not (len(__a ) > 0):
raise ValueError('''Must provide at least one input''' )
__UpperCAmelCase = [shape[:no_batch_dims] for shape in _fetch_dims(__a )]
__UpperCAmelCase = tuple([max(__a ) for s in zip(*__a )] )
def _prep_inputs(UpperCamelCase__ : List[str] ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
__UpperCAmelCase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
__UpperCAmelCase = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
__UpperCAmelCase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
__UpperCAmelCase = tensor_tree_map(_prep_inputs , __a )
__UpperCAmelCase = None
if _out is not None:
__UpperCAmelCase = tensor_tree_map(lambda UpperCamelCase__ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
__UpperCAmelCase = 1
for d in orig_batch_dims:
flat_batch_dim *= d
__UpperCAmelCase = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(UpperCamelCase__ : str ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
__UpperCAmelCase = 0
__UpperCAmelCase = prepped_outputs
for _ in range(__a ):
# Chunk the input
if not low_mem:
__UpperCAmelCase = _select_chunk
else:
__UpperCAmelCase = partial(
_chunk_slice , flat_start=__a , flat_end=min(__a , i + chunk_size ) , no_batch_dims=len(__a ) , )
__UpperCAmelCase = tensor_tree_map(__a , __a )
# Run the layer on the chunk
__UpperCAmelCase = layer(**__a )
# Allocate space for the output
if out is None:
__UpperCAmelCase = tensor_tree_map(lambda UpperCamelCase__ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , __a )
# Put the chunk in its pre-allocated space
if isinstance(__a , __a ):
def assign(UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] ) -> None:
for k, v in da.items():
if isinstance(__a , __a ):
assign(__a , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
__UpperCAmelCase = da[k]
assign(__a , __a )
elif isinstance(__a , __a ):
for xa, xa in zip(__a , __a ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
__UpperCAmelCase = xa
elif isinstance(__a , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
__UpperCAmelCase = output_chunk
else:
raise ValueError('''Not supported''' )
i += chunk_size
__UpperCAmelCase = tensor_tree_map(lambda UpperCamelCase__ : t.view(orig_batch_dims + t.shape[1:] ) , __a )
return out
class A :
def __init__( self : Optional[Any] , __a : int = 5_1_2 , ) -> List[Any]:
__UpperCAmelCase = max_chunk_size
__UpperCAmelCase = None
__UpperCAmelCase = None
def snake_case__ ( self : str , __a : Callable , __a : tuple , __a : int ) -> Optional[Any]:
logging.info('''Tuning chunk size...''' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
__UpperCAmelCase = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
__UpperCAmelCase = [c for c in candidates if c > min_chunk_size]
__UpperCAmelCase = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__a : int ) -> bool:
try:
with torch.no_grad():
fn(*lowerCamelCase__ , chunk_size=lowerCamelCase__ )
return True
except RuntimeError:
return False
__UpperCAmelCase = 0
__UpperCAmelCase = len(lowerCamelCase__ ) - 1
while i > min_viable_chunk_size_index:
__UpperCAmelCase = test_chunk_size(candidates[i] )
if not viable:
__UpperCAmelCase = (min_viable_chunk_size_index + i) // 2
else:
__UpperCAmelCase = i
__UpperCAmelCase = (i + len(lowerCamelCase__ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def snake_case__ ( self : List[Any] , __a : Iterable , __a : Iterable ) -> Any:
__UpperCAmelCase = True
for aa, aa in zip(lowerCamelCase__ , lowerCamelCase__ ):
assert type(lowerCamelCase__ ) == type(lowerCamelCase__ )
if isinstance(lowerCamelCase__ , (list, tuple) ):
consistent &= self._compare_arg_caches(lowerCamelCase__ , lowerCamelCase__ )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__UpperCAmelCase = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )]
__UpperCAmelCase = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )]
consistent &= self._compare_arg_caches(lowerCamelCase__ , lowerCamelCase__ )
else:
consistent &= aa == aa
return consistent
def snake_case__ ( self : Optional[Any] , __a : Callable , __a : tuple , __a : int , ) -> Union[str, Any]:
__UpperCAmelCase = True
__UpperCAmelCase = tree_map(lambda __a : a.shape if isinstance(lowerCamelCase__ , torch.Tensor ) else a , lowerCamelCase__ , lowerCamelCase__ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(lowerCamelCase__ )
__UpperCAmelCase = self._compare_arg_caches(self.cached_arg_data , lowerCamelCase__ )
else:
# Otherwise, we can reuse the precomputed value
__UpperCAmelCase = False
if not consistent:
__UpperCAmelCase = self._determine_favorable_chunk_size(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , )
__UpperCAmelCase = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 262 |
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
UpperCamelCase : Optional[int] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def UpperCamelCase_ ( __a ) -> Any:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def UpperCamelCase_ ( __a , __a , __a ) -> Any:
return max(metric_fn(__a , __a ) for gt in ground_truths )
def UpperCamelCase_ ( __a , __a , __a ) -> List[str]:
a__ : Tuple = [line.strip() for line in open(__a , "r" ).readlines()]
a__ : Tuple = []
if args.gold_data_mode == "qa":
a__ : Any = pd.read_csv(__a , sep="\t" , header=__a )
for answer_list in data[1]:
a__ : Union[str, Any] = ast.literal_eval(__a )
answers.append(__a )
else:
a__ : List[str] = [line.strip() for line in open(__a , "r" ).readlines()]
a__ : List[str] = [[reference] for reference in references]
a__ : List[str] = 0
for prediction, ground_truths in zip(__a , __a ):
total += 1
em += metric_max_over_ground_truths(__a , __a , __a )
fa += metric_max_over_ground_truths(__a , __a , __a )
a__ : Dict = 100.0 * em / total
a__ : Optional[Any] = 100.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]:
a__ : Optional[Any] = args.k
a__ : str = [line.strip() for line in open(__a , "r" ).readlines()]
a__ : Tuple = [line.strip() for line in open(__a , "r" ).readlines()]
a__ : Tuple = 0
for hypo, reference in zip(__a , __a ):
a__ : Any = set(hypo.split("\t" )[:k] )
a__ : Union[str, Any] = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
a__ : Union[str, Any] = 100.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]:
def strip_title(__a ):
if title.startswith("\"" ):
a__ : Optional[Any] = title[1:]
if title.endswith("\"" ):
a__ : Union[str, Any] = title[:-1]
return title
a__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__a , return_tensors="pt" , padding=__a , truncation=__a , )["input_ids"].to(args.device )
a__ : Optional[int] = rag_model.rag.question_encoder(__a )
a__ : Union[str, Any] = question_enc_outputs[0]
a__ : Optional[int] = rag_model.retriever(
__a , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
a__ : List[Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
a__ : int = []
for docs in all_docs:
a__ : Optional[int] = [strip_title(__a ) for title in docs["title"]]
provenance_strings.append("\t".join(__a ) )
return provenance_strings
def UpperCamelCase_ ( __a , __a , __a ) -> Dict:
with torch.no_grad():
a__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__a , return_tensors="pt" , padding=__a , truncation=__a )
a__ : Any = inputs_dict.input_ids.to(args.device )
a__ : Dict = inputs_dict.attention_mask.to(args.device )
a__ : Optional[int] = rag_model.generate( # rag_model overwrites generate
__a , attention_mask=__a , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__a , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
a__ : int = rag_model.retriever.generator_tokenizer.batch_decode(__a , skip_special_tokens=__a )
if args.print_predictions:
for q, a in zip(__a , __a ):
logger.info("Q: {} - A: {}".format(__a , __a ) )
return answers
def UpperCamelCase_ ( ) -> List[str]:
a__ : int = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=__a , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=__a , choices=["exact", "compressed", "legacy"] , type=__a , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=__a , type=__a , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=__a , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=__a , type=__a , required=__a , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=__a , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=__a , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=__a , type=__a , required=__a , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=__a , type=__a , required=__a , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=__a , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=__a , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=__a , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=__a , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=__a , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=__a , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
a__ : int = parser.parse_args()
a__ : Dict = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def UpperCamelCase_ ( __a ) -> Optional[int]:
a__ : Tuple = {}
if args.model_type is None:
a__ : List[str] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
a__ : int = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
a__ : Tuple = args.n_docs
if args.index_name is not None:
a__ : Any = args.index_name
if args.index_path is not None:
a__ : int = args.index_path
else:
a__ : Optional[Any] = BartForConditionalGeneration
a__ : Tuple = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , __a )
a__ : Any = get_scores if args.eval_mode == "e2e" else get_precision_at_k
a__ : Union[str, Any] = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(__a , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(__a ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
a__ : str = RagRetriever.from_pretrained(__a , **__a )
a__ : Optional[int] = model_class.from_pretrained(__a , retriever=__a , **__a )
model.retriever.init_retrieval()
else:
a__ : Dict = model_class.from_pretrained(__a , **__a )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
a__ : List[Any] = []
for line in tqdm(__a ):
questions.append(line.strip() )
if len(__a ) == args.eval_batch_size:
a__ : Union[str, Any] = evaluate_batch_fn(__a , __a , __a )
preds_file.write("\n".join(__a ) + "\n" )
preds_file.flush()
a__ : Any = []
if len(__a ) > 0:
a__ : List[str] = evaluate_batch_fn(__a , __a , __a )
preds_file.write("\n".join(__a ) )
preds_file.flush()
score_fn(__a , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
UpperCamelCase : List[Any] = get_args()
main(args)
| 37 | 0 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
UpperCamelCase ="""\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
"""
UpperCamelCase ="""
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
"""
UpperCamelCase ="""
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the SQuAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]
>>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]
>>> squad_metric = datasets.load_metric(\"squad\")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def _UpperCAmelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase_ : Any = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
UpperCamelCase_ : Optional[int] = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
UpperCamelCase_ : Any = evaluate(dataset=lowerCamelCase__ , predictions=lowerCamelCase__ )
return score
| 208 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a = None , ) -> str:
a__ : int = {}
if train_file is not None:
a__ : int = [train_file]
if eval_file is not None:
a__ : Union[str, Any] = [eval_file]
if test_file is not None:
a__ : str = [test_file]
a__ : Optional[Any] = datasets.load_dataset("csv" , data_files=__a )
a__ : List[Any] = list(ds[list(files.keys() )[0]].features.keys() )
a__ : str = features_name.pop(__a )
a__ : Dict = list(set(ds[list(files.keys() )[0]][label_name] ) )
a__ : str = {label: i for i, label in enumerate(__a )}
a__ : Tuple = tokenizer.model_input_names
a__ : List[str] = {}
if len(__a ) == 1:
for k in files.keys():
a__ : Optional[Any] = ds[k].map(
lambda __a : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__a , max_length=__a , padding="max_length" ) , batched=__a , )
elif len(__a ) == 2:
for k in files.keys():
a__ : Dict = ds[k].map(
lambda __a : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__a , max_length=__a , padding="max_length" , ) , batched=__a , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
a__ : str = {k: v for k, v in ex.items() if k in input_names}
a__ : str = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
a__ : Tuple = {k: v for k, v in ex.items() if k in input_names}
a__ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
a__ : List[Any] = {k: v for k, v in ex.items() if k in input_names}
a__ : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
a__ : Optional[Any] = (
tf.data.Dataset.from_generator(
__a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
a__ : Optional[int] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
a__ : Union[str, Any] = (
tf.data.Dataset.from_generator(
__a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
a__ : Optional[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
a__ : Union[str, Any] = (
tf.data.Dataset.from_generator(
__a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
a__ : Tuple = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
UpperCamelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass
class A__ :
"""simple docstring"""
_lowercase = field(metadata={'help': 'Which column contains the label'} )
_lowercase = field(default=A__ , metadata={'help': 'The path of the training file'} )
_lowercase = field(default=A__ , metadata={'help': 'The path of the development file'} )
_lowercase = field(default=A__ , metadata={'help': 'The path of the test file'} )
_lowercase = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_lowercase = field(
default=A__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class A__ :
"""simple docstring"""
_lowercase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_lowercase = field(
default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_lowercase = field(
default=A__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_lowercase = field(default=A__ , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_lowercase = field(
default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def UpperCamelCase_ ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a__ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
a__, a__, a__ : str = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a__ : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a__, a__, a__, a__ : Optional[Any] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__a , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
a__ : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__a ) , labelaid=__a , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
a__ : Any = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , )
def compute_metrics(__a ) -> Dict:
a__ : Union[str, Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
a__ : Dict = TFTrainer(
model=__a , args=__a , train_dataset=__a , eval_dataset=__a , compute_metrics=__a , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a__ : Optional[Any] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a__ : Dict = trainer.evaluate()
a__ : int = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(__a , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(__a )
return results
if __name__ == "__main__":
main()
| 37 | 0 |
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase_: str = word.split()
def justify(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
lowerCamelCase_: List[str] = max_width - width
lowerCamelCase_: str = len(__a )
if len(__a ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
lowerCamelCase_: Dict = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
lowerCamelCase_: str = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
lowerCamelCase_: List[Any] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(__a ):
num_spaces_between_words_list[i] += 1
lowerCamelCase_: Union[str, Any] = []
for i in range(__a ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(__a )
lowerCamelCase_: Any = []
lowerCamelCase_: list[str] = []
lowerCamelCase_: Any = 0
for word in words:
if width + len(__a ) + len(__a ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(__a )
width += len(__a )
else:
# justify the line and add it to result
answer.append(justify(__a , __a , __a ) )
# reset new line and new width
lowerCamelCase_: Optional[int] = [word], len(__a )
lowerCamelCase_: int = max_width - width - len(__a )
answer.append(""" """.join(__a ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 423 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
UpperCamelCase : List[str] = re.compile(r"""\b(a|an|the)\b""", re.UNICODE)
UpperCamelCase : Union[str, Any] = None
def UpperCamelCase_ ( ) -> List[str]:
a__ : List[Any] = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=__a , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=__a , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def UpperCamelCase_ ( __a ) -> str:
a__ : Optional[Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a__ : Dict = bool(qa["answers"]["text"] )
return qid_to_has_ans
def UpperCamelCase_ ( __a ) -> List[Any]:
def remove_articles(__a ):
return ARTICLES_REGEX.sub(" " , __a )
def white_space_fix(__a ):
return " ".join(text.split() )
def remove_punc(__a ):
a__ : Union[str, Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__a ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__a ) ) ) )
def UpperCamelCase_ ( __a ) -> Dict:
if not s:
return []
return normalize_answer(__a ).split()
def UpperCamelCase_ ( __a , __a ) -> str:
return int(normalize_answer(__a ) == normalize_answer(__a ) )
def UpperCamelCase_ ( __a , __a ) -> Dict:
a__ : int = get_tokens(__a )
a__ : Optional[Any] = get_tokens(__a )
a__ : Any = collections.Counter(__a ) & collections.Counter(__a )
a__ : Dict = sum(common.values() )
if len(__a ) == 0 or len(__a ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
a__ : Tuple = 1.0 * num_same / len(__a )
a__ : str = 1.0 * num_same / len(__a )
a__ : str = (2 * precision * recall) / (precision + recall)
return fa
def UpperCamelCase_ ( __a , __a ) -> int:
a__ : List[str] = {}
a__ : Optional[int] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a__ : List[Any] = qa["id"]
a__ : Dict = [t for t in qa["answers"]["text"] if normalize_answer(__a )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
a__ : Tuple = [""]
if qid not in preds:
print(f'''Missing prediction for {qid}''' )
continue
a__ : Tuple = preds[qid]
# Take max over all gold answers
a__ : Optional[int] = max(compute_exact(__a , __a ) for a in gold_answers )
a__ : str = max(compute_fa(__a , __a ) for a in gold_answers )
return exact_scores, fa_scores
def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[int]:
a__ : Optional[Any] = {}
for qid, s in scores.items():
a__ : Dict = na_probs[qid] > na_prob_thresh
if pred_na:
a__ : Dict = float(not qid_to_has_ans[qid] )
else:
a__ : Optional[Any] = s
return new_scores
def UpperCamelCase_ ( __a , __a , __a=None ) -> Tuple:
if not qid_list:
a__ : Union[str, Any] = len(__a )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
a__ : int = len(__a )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def UpperCamelCase_ ( __a , __a , __a ) -> List[str]:
for k in new_eval:
a__ : Optional[Any] = new_eval[k]
def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[int]:
plt.step(__a , __a , color="b" , alpha=0.2 , where="post" )
plt.fill_between(__a , __a , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__a )
plt.savefig(__a )
plt.clf()
def UpperCamelCase_ ( __a , __a , __a , __a , __a=None , __a=None ) -> Dict:
a__ : Optional[Any] = sorted(__a , key=lambda __a : na_probs[k] )
a__ : Any = 0.0
a__ : Optional[int] = 1.0
a__ : Optional[int] = 0.0
a__ : Any = [1.0]
a__ : Tuple = [0.0]
a__ : List[str] = 0.0
for i, qid in enumerate(__a ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
a__ : Any = true_pos / float(i + 1 )
a__ : int = true_pos / float(__a )
if i == len(__a ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__a )
recalls.append(__a )
if out_image:
plot_pr_curve(__a , __a , __a , __a )
return {"ap": 100.0 * avg_prec}
def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a ) -> str:
if out_image_dir and not os.path.exists(__a ):
os.makedirs(__a )
a__ : Optional[int] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
a__ : Optional[int] = make_precision_recall_eval(
__a , __a , __a , __a , out_image=os.path.join(__a , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
a__ : Optional[Any] = make_precision_recall_eval(
__a , __a , __a , __a , out_image=os.path.join(__a , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
a__ : str = {k: float(__a ) for k, v in qid_to_has_ans.items()}
a__ : Optional[Any] = make_precision_recall_eval(
__a , __a , __a , __a , out_image=os.path.join(__a , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(__a , __a , "pr_exact" )
merge_eval(__a , __a , "pr_f1" )
merge_eval(__a , __a , "pr_oracle" )
def UpperCamelCase_ ( __a , __a , __a , __a ) -> str:
if not qid_list:
return
a__ : Optional[Any] = [na_probs[k] for k in qid_list]
a__ : str = np.ones_like(__a ) / float(len(__a ) )
plt.hist(__a , weights=__a , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(f'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(__a , f'''na_prob_hist_{name}.png''' ) )
plt.clf()
def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[Any]:
a__ : str = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
a__ : Optional[Any] = num_no_ans
a__ : Dict = cur_score
a__ : Any = 0.0
a__ : Optional[Any] = sorted(__a , key=lambda __a : na_probs[k] )
for i, qid in enumerate(__a ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
a__ : Optional[int] = scores[qid]
else:
if preds[qid]:
a__ : str = -1
else:
a__ : Union[str, Any] = 0
cur_score += diff
if cur_score > best_score:
a__ : Any = cur_score
a__ : Dict = na_probs[qid]
return 100.0 * best_score / len(__a ), best_thresh
def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a ) -> Any:
a__, a__ : Tuple = find_best_thresh(__a , __a , __a , __a )
a__, a__ : Tuple = find_best_thresh(__a , __a , __a , __a )
a__ : Any = best_exact
a__ : Any = exact_thresh
a__ : List[Any] = best_fa
a__ : Optional[int] = fa_thresh
def UpperCamelCase_ ( ) -> Tuple:
with open(OPTS.data_file ) as f:
a__ : List[Any] = json.load(__a )
a__ : Any = dataset_json["data"]
with open(OPTS.pred_file ) as f:
a__ : int = json.load(__a )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
a__ : List[str] = json.load(__a )
else:
a__ : Optional[int] = {k: 0.0 for k in preds}
a__ : Optional[Any] = make_qid_to_has_ans(__a ) # maps qid to True/False
a__ : List[Any] = [k for k, v in qid_to_has_ans.items() if v]
a__ : Union[str, Any] = [k for k, v in qid_to_has_ans.items() if not v]
a__, a__ : str = get_raw_scores(__a , __a )
a__ : str = apply_no_ans_threshold(__a , __a , __a , OPTS.na_prob_thresh )
a__ : str = apply_no_ans_threshold(__a , __a , __a , OPTS.na_prob_thresh )
a__ : Tuple = make_eval_dict(__a , __a )
if has_ans_qids:
a__ : str = make_eval_dict(__a , __a , qid_list=__a )
merge_eval(__a , __a , "HasAns" )
if no_ans_qids:
a__ : List[Any] = make_eval_dict(__a , __a , qid_list=__a )
merge_eval(__a , __a , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(__a , __a , __a , __a , __a , __a )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__a , __a , __a , __a , __a , OPTS.out_image_dir )
histogram_na_prob(__a , __a , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(__a , __a , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(__a , __a )
else:
print(json.dumps(__a , indent=2 ) )
if __name__ == "__main__":
UpperCamelCase : Any = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 37 | 0 |
from __future__ import annotations
import math
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : List[Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Optional[int] ):
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(__a ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, __a, __a, __a ), minimax(depth + 1, node_index * 2 + 1, __a, __a, __a ), )
return min(
minimax(depth + 1, node_index * 2, __a, __a, __a ), minimax(depth + 1, node_index * 2 + 1, __a, __a, __a ), )
def a_ ( ):
__lowerCAmelCase = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__lowerCAmelCase = math.log(len(__a ), 2 )
print('Optimal value : ', end='' )
print(minimax(0, 0, __a, __a, __a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 53 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = CLIPTokenizer
_lowercase = CLIPTokenizerFast
_lowercase = True
_lowercase = {}
_lowercase = False
def _UpperCamelCase( self : List[Any] ):
super().setUp()
# fmt: off
a__ : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
a__ : Optional[Any] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
a__ : Optional[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
a__ : Optional[Any] = {"unk_token": "<unk>"}
a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCamelCase__ ) )
def _UpperCamelCase( self : Dict , **lowerCamelCase__ : int ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def _UpperCamelCase( self : Union[str, Any] , **lowerCamelCase__ : Optional[int] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : Optional[Any] ):
a__ : int = "lower newer"
a__ : Optional[int] = "lower newer"
return input_text, output_text
def _UpperCamelCase( self : List[str] ):
a__ : Union[str, Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a__ : int = "lower newer"
a__ : List[str] = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
a__ : Union[str, Any] = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : int = tokens + [tokenizer.unk_token]
a__ : Union[str, Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
@require_ftfy
def _UpperCamelCase( self : Optional[Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a__ : List[str] = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
a__ : Any = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
a__ : int = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
a__ : Optional[Any] = tokenizer_s.tokenize(lowerCamelCase__ )
a__ : Dict = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
a__ : Optional[Any] = "xa\u0303y" + " " + "x\xe3y"
a__ : Optional[int] = tokenizer_s.tokenize(lowerCamelCase__ )
a__ : int = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Test that the tokenization is identical on unicode of space type
a__ : str = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
a__ : Any = tokenizer_s.tokenize(lowerCamelCase__ )
a__ : int = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Test that the tokenization is identical on unicode of line break type
a__ : Union[str, Any] = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
a__ : List[Any] = tokenizer_s.tokenize(lowerCamelCase__ )
a__ : int = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a__ : str = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
a__ : Tuple = f'''{text_of_1_token} {text_of_1_token}'''
a__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , )
a__ : Union[str, Any] = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase__ ) + 1, len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
a__ : Optional[Any] = f''' {text}'''
a__ : str = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , )
a__ : Dict = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase__ ) + 1, 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
def _UpperCamelCase( self : int ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowerCamelCase__ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def _UpperCamelCase( self : int ):
super().test_tokenization_python_rust_equals()
def _UpperCamelCase( self : str ):
# CLIP always lower cases letters
pass
| 37 | 0 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__A : int = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase = False
elif args.student_type == "gpt2":
UpperCAmelCase = False
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase = False
def __SCREAMING_SNAKE_CASE ( ) -> Any:
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=__a , required=__a , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=__a , required=__a , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=__a , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=__a , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=__a , required=__a , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=__a , type=__a , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=__a , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=__a , required=__a , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=__a , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=__a , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=__a , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=__a , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=__a , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=__a , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=__a , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=__a , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=__a , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=__a , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=__a , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=__a , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=__a , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=__a , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__a , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=__a , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=__a , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=__a , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=__a , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=__a , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=__a , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=__a , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=__a , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=__a , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=__a , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=__a , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=__a , default=4000 , help='''Checkpoint interval.''' )
UpperCAmelCase = parser.parse_args()
sanity_checks(__a )
# ARGS #
init_gpu_params(__a )
set_seed(__a )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(F"""Param: {args}""" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(__a ) , __a , indent=4 )
git_log(args.dump_path )
UpperCAmelCase = MODEL_CLASSES[args.student_type]
UpperCAmelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCAmelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCAmelCase = tokenizer.all_special_tokens.index(__a )
UpperCAmelCase = tokenizer.all_special_ids[idx]
logger.info(F"""Special tokens {special_tok_ids}""" )
UpperCAmelCase = special_tok_ids
UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"""Loading data from {args.data_file}""" )
with open(args.data_file , '''rb''' ) as fp:
UpperCAmelCase = pickle.load(__a )
if args.mlm:
logger.info(F"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , '''rb''' ) as fp:
UpperCAmelCase = pickle.load(__a )
UpperCAmelCase = np.maximum(__a , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCAmelCase = 0.0 # do not predict special tokens
UpperCAmelCase = torch.from_numpy(__a )
else:
UpperCAmelCase = None
UpperCAmelCase = LmSeqsDataset(params=__a , data=__a )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F"""Loading student config from {args.student_config}""" )
UpperCAmelCase = student_config_class.from_pretrained(args.student_config )
UpperCAmelCase = True
if args.student_pretrained_weights is not None:
logger.info(F"""Loading pretrained weights from {args.student_pretrained_weights}""" )
UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=__a )
else:
UpperCAmelCase = student_model_class(__a )
if args.n_gpu > 0:
student.to(F"""cuda:{args.local_rank}""" )
logger.info('''Student loaded.''' )
# TEACHER #
UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__a )
if args.n_gpu > 0:
teacher.to(F"""cuda:{args.local_rank}""" )
logger.info(F"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(__a , __a )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(__a , __a )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCAmelCase = Distiller(
params=__a , dataset=__a , token_probs=__a , student=__a , teacher=__a )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 130 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
UpperCamelCase : Dict = """<<<<<<< This should probably be modified because it mentions: """
UpperCamelCase : List[Any] = """=======
>>>>>>>
"""
UpperCamelCase : Optional[Any] = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
UpperCamelCase : Any = [
# (pattern, replacement)
# Order is important here for some replacements
(r"""tfds\.core""", r"""datasets"""),
(r"""tf\.io\.gfile\.GFile""", r"""open"""),
(r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""),
(r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""),
(r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""),
(r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""),
(r"""tfds\.features\.FeaturesDict\(""", r"""dict("""),
(r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(r"""tfds\.""", r"""datasets."""),
(r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""),
(r"""self\.builder_config""", r"""self.config"""),
]
def UpperCamelCase_ ( __a ) -> Optional[Any]:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class A__ ( A__ ):
"""simple docstring"""
@staticmethod
def _UpperCamelCase( lowerCamelCase__ : ArgumentParser ):
a__ : List[str] = parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str , *lowerCamelCase__ : Tuple ):
a__ : str = get_logger("datasets-cli/converting" )
a__ : Optional[Any] = tfds_path
a__ : Optional[int] = datasets_directory
def _UpperCamelCase( self : int ):
if os.path.isdir(self._tfds_path ):
a__ : List[str] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
a__ : Any = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
a__ : Dict = os.path.abspath(self._datasets_directory )
self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
a__ : Tuple = []
a__ : str = []
a__ : List[Any] = {}
if os.path.isdir(self._tfds_path ):
a__ : List[str] = os.listdir(lowerCamelCase__ )
else:
a__ : Union[str, Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'''Looking at file {f_name}''' )
a__ : Any = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
a__ : Dict = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
if not os.path.isfile(lowerCamelCase__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(lowerCamelCase__ , encoding="utf-8" ) as f:
a__ : List[Any] = f.readlines()
a__ : Union[str, Any] = []
a__ : Union[str, Any] = False
a__ : Union[str, Any] = False
a__ : Dict = []
for line in lines:
a__ : Optional[Any] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
a__ : List[Any] = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
a__ : List[str] = ""
continue
elif "from absl import logging" in out_line:
a__ : Dict = "from datasets import logging\n"
elif "getLogger" in out_line:
a__ : List[Any] = out_line.replace("getLogger" , "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
a__ : List[str] = True
a__ : Dict = list(filter(lambda lowerCamelCase__ : e in out_line , lowerCamelCase__ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCamelCase__ ) + "\n" )
out_lines.append(lowerCamelCase__ )
out_lines.append(lowerCamelCase__ )
continue
else:
for pattern, replacement in TO_CONVERT:
a__ : Tuple = re.sub(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
a__ : Optional[int] = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , lowerCamelCase__ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
a__ : Optional[Any] = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
a__ : Optional[int] = True
out_lines.append(lowerCamelCase__ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
a__ : Dict = f_name.replace(".py" , "" )
a__ : Optional[int] = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
a__ : Any = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
self._logger.info(f'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCamelCase__ )
if needs_manual_update:
with_manual_update.append(lowerCamelCase__ )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f:
f.writelines(lowerCamelCase__ )
self._logger.info(f'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
a__ : Any = os.path.basename(lowerCamelCase__ )
a__ : Optional[int] = imports_to_builder_map[f_name.replace(".py" , "" )]
self._logger.info(f'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(lowerCamelCase__ , lowerCamelCase__ )
except KeyError:
self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 37 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.