code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
def __A ( _A , _A ):
"""simple docstring"""
__a = [[] for _ in range(_A )]
__a = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1 or len(_A ) <= key:
return input_string
for position, character in enumerate(_A ):
__a = position % (lowest * 2) # puts it in bounds
__a = min(_A , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(_A )
__a = ["".join(_A ) for row in temp_grid]
__a = "".join(_A )
return output_string
def __A ( _A , _A ):
"""simple docstring"""
__a = []
__a = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1:
return input_string
__a = [[] for _ in range(_A )] # generates template
for position in range(len(_A ) ):
__a = position % (lowest * 2) # puts it in bounds
__a = min(_A , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("*" )
__a = 0
for row in temp_grid: # fills in the characters
__a = input_string[counter : counter + len(_A )]
grid.append(list(_A ) )
counter += len(_A )
__a = "" # reads as zigzag
for position in range(len(_A ) ):
__a = position % (lowest * 2) # puts it in bounds
__a = min(_A , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def __A ( _A ):
"""simple docstring"""
__a = {}
for key_guess in range(1 , len(_A ) ): # tries every key
__a = decrypt(_A , _A )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 197 | from typing import Any
def __A ( _A ):
"""simple docstring"""
if not input_list:
return []
__a = [input_list.count(_A ) for value in input_list]
__a = max(_A ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(_A ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 197 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 712 | '''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=30 , snake_case_=400 , snake_case_=True , snake_case_=None , snake_case_=0.9 , snake_case_=None , snake_case_=True , snake_case_=[0.5, 0.5, 0.5] , snake_case_=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = size if size is not None else {"""shortest_edge""": 30}
__UpperCAmelCase: str = crop_size if crop_size is not None else {"""height""": 30, """width""": 30}
__UpperCAmelCase: Dict = parent
__UpperCAmelCase: Union[str, Any] = batch_size
__UpperCAmelCase: List[Any] = num_channels
__UpperCAmelCase: List[str] = min_resolution
__UpperCAmelCase: List[str] = max_resolution
__UpperCAmelCase: Tuple = do_resize_and_center_crop
__UpperCAmelCase: Union[str, Any] = size
__UpperCAmelCase: str = crop_pct
__UpperCAmelCase: List[Any] = crop_size
__UpperCAmelCase: str = do_normalize
__UpperCAmelCase: Union[str, Any] = image_mean
__UpperCAmelCase: List[str] = image_std
def lowercase_ ( self ):
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class a ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = PoolFormerImageProcessor if is_vision_available() else None
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Any = PoolFormerImageProcessingTester(self )
@property
def lowercase_ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , """do_resize_and_center_crop""" ) )
self.assertTrue(hasattr(snake_case_ , """size""" ) )
self.assertTrue(hasattr(snake_case_ , """crop_pct""" ) )
self.assertTrue(hasattr(snake_case_ , """do_normalize""" ) )
self.assertTrue(hasattr(snake_case_ , """image_mean""" ) )
self.assertTrue(hasattr(snake_case_ , """image_std""" ) )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 30} )
self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} )
__UpperCAmelCase: Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def lowercase_ ( self ):
'''simple docstring'''
pass
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase: Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
__UpperCAmelCase: Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__UpperCAmelCase: Any = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase: Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
__UpperCAmelCase: Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__UpperCAmelCase: str = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase: Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
__UpperCAmelCase: Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__UpperCAmelCase: Optional[int] = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , ) | 466 | 0 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def UpperCamelCase ( snake_case__ : Dict ,snake_case__ : Tuple ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ ,lowerCAmelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" ,[False, True] )
def UpperCamelCase ( snake_case__ : Dict ,snake_case__ : int ,snake_case__ : str ,snake_case__ : List[Any] ):
'''simple docstring'''
__snake_case :Optional[int] = tmp_path / """cache"""
__snake_case :int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__snake_case :str = SqlDatasetReader(
"""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=lowerCAmelCase_ ,keep_in_memory=lowerCAmelCase_ ).read()
_check_sql_dataset(lowerCAmelCase_ ,lowerCAmelCase_ )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" ,[
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] ,)
def UpperCamelCase ( snake_case__ : int ,snake_case__ : List[Any] ,snake_case__ : List[Any] ,snake_case__ : Optional[Any] ):
'''simple docstring'''
__snake_case :List[Any] = tmp_path / """cache"""
__snake_case :Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__snake_case :Optional[Any] = features.copy() if features else default_expected_features
__snake_case :Dict = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__snake_case :Optional[Any] = SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,features=lowerCAmelCase_ ,cache_dir=lowerCAmelCase_ ).read()
_check_sql_dataset(lowerCAmelCase_ ,lowerCAmelCase_ )
def UpperCamelCase ( snake_case__ : str ):
'''simple docstring'''
with contextlib.closing(sqlitea.connect(lowerCAmelCase_ ) ) as con:
__snake_case :Union[str, Any] = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def UpperCamelCase ( snake_case__ : str ,snake_case__ : List[Any] ,snake_case__ : Dict ):
'''simple docstring'''
__snake_case :Dict = tmp_path / """cache"""
__snake_case :Optional[int] = os.path.join(lowerCAmelCase_ ,"""tmp.sql""" )
__snake_case :Any = SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=lowerCAmelCase_ ).read()
SqlDatasetWriter(lowerCAmelCase_ ,"""dataset""" ,"""sqlite:///""" + output_sqlite_path ,num_proc=1 ).write()
__snake_case :Dict = iter_sql_file(lowerCAmelCase_ )
__snake_case :int = iter_sql_file(lowerCAmelCase_ )
for rowa, rowa in zip(lowerCAmelCase_ ,lowerCAmelCase_ ):
assert rowa == rowa
@require_sqlalchemy
def UpperCamelCase ( snake_case__ : List[str] ,snake_case__ : Union[str, Any] ,snake_case__ : Union[str, Any] ):
'''simple docstring'''
__snake_case :List[Any] = tmp_path / """cache"""
__snake_case :List[str] = os.path.join(lowerCAmelCase_ ,"""tmp.sql""" )
__snake_case :Optional[int] = SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=lowerCAmelCase_ ).read()
SqlDatasetWriter(lowerCAmelCase_ ,"""dataset""" ,"""sqlite:///""" + output_sqlite_path ,num_proc=2 ).write()
__snake_case :Union[str, Any] = iter_sql_file(lowerCAmelCase_ )
__snake_case :List[Any] = iter_sql_file(lowerCAmelCase_ )
for rowa, rowa in zip(lowerCAmelCase_ ,lowerCAmelCase_ ):
assert rowa == rowa
@require_sqlalchemy
def UpperCamelCase ( snake_case__ : Union[str, Any] ,snake_case__ : List[str] ,snake_case__ : Dict ):
'''simple docstring'''
__snake_case :int = tmp_path / """cache"""
__snake_case :List[Any] = os.path.join(lowerCAmelCase_ ,"""tmp.sql""" )
__snake_case :int = SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=lowerCAmelCase_ ).read()
with pytest.raises(lowerCAmelCase_ ):
SqlDatasetWriter(lowerCAmelCase_ ,"""dataset""" ,"""sqlite:///""" + output_sqlite_path ,num_proc=0 ).write()
| 455 |
"""simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a__ : str = logging.get_logger(__name__)
class UpperCamelCase_ ( enum.Enum):
"""simple docstring"""
snake_case__ : Optional[int] = 0
snake_case__ : Dict = 1
@add_end_docstrings(UpperCamelCase)
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Tuple = "generated"
def __init__( self : Any , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : str ) -> Dict:
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : Union[str, Any] , ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = {}
if truncation is not None:
__SCREAMING_SNAKE_CASE = truncation
__SCREAMING_SNAKE_CASE = generate_kwargs
__SCREAMING_SNAKE_CASE = {}
if return_tensors is not None and return_type is None:
__SCREAMING_SNAKE_CASE = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
__SCREAMING_SNAKE_CASE = return_type
if clean_up_tokenization_spaces is not None:
__SCREAMING_SNAKE_CASE = clean_up_tokenization_spaces
if stop_sequence is not None:
__SCREAMING_SNAKE_CASE = self.tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
__SCREAMING_SNAKE_CASE = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> List[str]:
return True
def UpperCAmelCase_ ( self : Any , *UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] ) -> Any:
__SCREAMING_SNAKE_CASE = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0] , UpperCAmelCase__ ):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input" )
__SCREAMING_SNAKE_CASE = ([prefix + arg for arg in args[0]],)
__SCREAMING_SNAKE_CASE = True
elif isinstance(args[0] , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = (prefix + args[0],)
__SCREAMING_SNAKE_CASE = False
else:
raise ValueError(
F""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
__SCREAMING_SNAKE_CASE = self.tokenizer(*UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : List[str] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : Union[str, Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE = super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ )
if (
isinstance(args[0] , UpperCAmelCase__ )
and all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for el in args[0] )
and all(len(UpperCAmelCase__ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , **UpperCAmelCase__ : int ) -> Tuple:
__SCREAMING_SNAKE_CASE = self._parse_and_tokenize(UpperCAmelCase__ , truncation=UpperCAmelCase__ , **UpperCAmelCase__ )
return inputs
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : int , **UpperCAmelCase__ : Any ) -> Any:
if self.framework == "pt":
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model_inputs["input_ids"].shape
elif self.framework == "tf":
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = tf.shape(model_inputs["input_ids"] ).numpy()
__SCREAMING_SNAKE_CASE = generate_kwargs.get("min_length" , self.model.config.min_length )
__SCREAMING_SNAKE_CASE = generate_kwargs.get("max_length" , self.model.config.max_length )
self.check_inputs(UpperCAmelCase__ , generate_kwargs["min_length"] , generate_kwargs["max_length"] )
__SCREAMING_SNAKE_CASE = self.model.generate(**UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = output_ids.shape[0]
if self.framework == "pt":
__SCREAMING_SNAKE_CASE = output_ids.reshape(UpperCAmelCase__ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
__SCREAMING_SNAKE_CASE = tf.reshape(UpperCAmelCase__ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict=ReturnType.TEXT , UpperCAmelCase__ : str=False ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
__SCREAMING_SNAKE_CASE = {F"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
__SCREAMING_SNAKE_CASE = {
F"""{self.return_name}_text""": self.tokenizer.decode(
UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ , )
}
records.append(UpperCAmelCase__ )
return records
@add_end_docstrings(UpperCamelCase)
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : str = "summary"
def __call__( self : Tuple , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Any ) -> Optional[int]:
return super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> bool:
if max_length < min_length:
logger.warning(F"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
F"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"a summarization task, where outputs shorter than the input are typically wanted, you might "
F"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(UpperCamelCase)
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : str = "translation"
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Optional[Any]:
if input_length > 0.9 * max_length:
logger.warning(
F"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"increasing your max_length manually, e.g. translator('...', max_length=400)" )
return True
def UpperCAmelCase_ ( self : Any , *UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Optional[Any]=None ) -> List[Any]:
if getattr(self.tokenizer , "_build_translation_inputs" , UpperCAmelCase__ ):
return self.tokenizer._build_translation_inputs(
*UpperCAmelCase__ , return_tensors=self.framework , truncation=UpperCAmelCase__ , src_lang=UpperCAmelCase__ , tgt_lang=UpperCAmelCase__ )
else:
return super()._parse_and_tokenize(*UpperCAmelCase__ , truncation=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str=None , **UpperCAmelCase__ : List[str] ) -> Any:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = super()._sanitize_parameters(**UpperCAmelCase__ )
if src_lang is not None:
__SCREAMING_SNAKE_CASE = src_lang
if tgt_lang is not None:
__SCREAMING_SNAKE_CASE = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
__SCREAMING_SNAKE_CASE = kwargs.get("task" , self.task )
__SCREAMING_SNAKE_CASE = task.split("_" )
if task and len(UpperCAmelCase__ ) == 4:
# translation, XX, to YY
__SCREAMING_SNAKE_CASE = items[1]
__SCREAMING_SNAKE_CASE = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : str , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Any ) -> List[Any]:
return super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 682 | 0 |
from __future__ import annotations
def lowerCamelCase_ ( A : list[list[int]] ):
"""simple docstring"""
lowerCAmelCase_ = len(A )
# We need to create solution object to save path.
lowerCAmelCase_ = [[0 for _ in range(A )] for _ in range(A )]
lowerCAmelCase_ = run_maze(A , 0 , 0 , A )
if solved:
print('''\n'''.join(str(A ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def lowerCamelCase_ ( A : list[list[int]] , A : int , A : int , A : list[list[int]] ):
"""simple docstring"""
lowerCAmelCase_ = len(A )
# Final check point.
if i == j == (size - 1):
lowerCAmelCase_ = 1
return True
lowerCAmelCase_ = (not i < 0) and (not j < 0) # Check lower bounds
lowerCAmelCase_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
lowerCAmelCase_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
lowerCAmelCase_ = 1
# check for directions
if (
run_maze(A , i + 1 , A , A )
or run_maze(A , A , j + 1 , A )
or run_maze(A , i - 1 , A , A )
or run_maze(A , A , j - 1 , A )
):
return True
lowerCAmelCase_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self):
lowerCAmelCase_ = ['''a''', '''b''', '''c''']
# Defaults to last layer if both are None
lowerCAmelCase_ , lowerCAmelCase_ = get_aligned_output_features_output_indices(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , ['''c'''])
self.assertEqual(_UpperCAmelCase , [2])
# Out indices set to match out features
lowerCAmelCase_ , lowerCAmelCase_ = get_aligned_output_features_output_indices(['''a''', '''c'''] , _UpperCAmelCase , _UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , ['''a''', '''c'''])
self.assertEqual(_UpperCAmelCase , [0, 2])
# Out features set to match out indices
lowerCAmelCase_ , lowerCAmelCase_ = get_aligned_output_features_output_indices(_UpperCAmelCase , [0, 2] , _UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , ['''a''', '''c'''])
self.assertEqual(_UpperCAmelCase , [0, 2])
# Out features selected from negative indices
lowerCAmelCase_ , lowerCAmelCase_ = get_aligned_output_features_output_indices(_UpperCAmelCase , [-3, -1] , _UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , ['''a''', '''c'''])
self.assertEqual(_UpperCAmelCase , [-3, -1])
def lowercase__ ( self):
# Stage names must be set
with self.assertRaises(_UpperCAmelCase):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , _UpperCAmelCase)
# Out features must be a list
with self.assertRaises(_UpperCAmelCase):
verify_out_features_out_indices(('''a''', '''b''') , (0, 1) , ['''a''', '''b'''])
# Out features must be a subset of stage names
with self.assertRaises(_UpperCAmelCase):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , ['''a'''])
# Out indices must be a list or tuple
with self.assertRaises(_UpperCAmelCase):
verify_out_features_out_indices(_UpperCAmelCase , 0 , ['''a''', '''b'''])
# Out indices must be a subset of stage names
with self.assertRaises(_UpperCAmelCase):
verify_out_features_out_indices(_UpperCAmelCase , (0, 1) , ['''a'''])
# Out features and out indices must be the same length
with self.assertRaises(_UpperCAmelCase):
verify_out_features_out_indices(['''a''', '''b'''] , (0,) , ['''a''', '''b''', '''c'''])
# Out features should match out indices
with self.assertRaises(_UpperCAmelCase):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 2) , ['''a''', '''b''', '''c'''])
# Out features and out indices should be in order
with self.assertRaises(_UpperCAmelCase):
verify_out_features_out_indices(['''b''', '''a'''] , (0, 1) , ['''a''', '''b'''])
# Check passes with valid inputs
verify_out_features_out_indices(['''a''', '''b''', '''d'''] , (0, 1, -1) , ['''a''', '''b''', '''c''', '''d'''])
def lowercase__ ( self):
lowerCAmelCase_ = BackboneMixin()
lowerCAmelCase_ = ['''a''', '''b''', '''c''']
lowerCAmelCase_ = ['''a''', '''c''']
lowerCAmelCase_ = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['''a''', '''c'''])
self.assertEqual(backbone.out_indices , [0, 2])
# Check out features and indices are updated correctly
lowerCAmelCase_ = ['''a''', '''b''']
self.assertEqual(backbone.out_features , ['''a''', '''b'''])
self.assertEqual(backbone.out_indices , [0, 1])
lowerCAmelCase_ = [-3, -1]
self.assertEqual(backbone.out_features , ['''a''', '''c'''])
self.assertEqual(backbone.out_indices , [-3, -1])
| 413 | 0 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
__A = {
"vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"},
"merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"},
}
__A = {
"ctrl": 2_56,
}
__A = {
"Pregnancy": 16_86_29,
"Christianity": 76_75,
"Explain": 10_64_23,
"Fitness": 6_34_40,
"Saving": 6_31_63,
"Ask": 2_71_71,
"Ass": 9_59_85,
"Joke": 16_35_09,
"Questions": 4_56_22,
"Thoughts": 4_96_05,
"Retail": 5_23_42,
"Feminism": 16_43_38,
"Writing": 1_19_92,
"Atheism": 19_22_63,
"Netflix": 4_86_16,
"Computing": 3_96_39,
"Opinion": 4_32_13,
"Alone": 4_49_67,
"Funny": 5_89_17,
"Gaming": 4_03_58,
"Human": 40_88,
"India": 13_31,
"Joker": 7_71_38,
"Diet": 3_62_06,
"Legal": 1_18_59,
"Norman": 49_39,
"Tip": 7_26_89,
"Weight": 5_23_43,
"Movies": 4_62_73,
"Running": 2_34_25,
"Science": 20_90,
"Horror": 3_77_93,
"Confession": 6_05_72,
"Finance": 1_22_50,
"Politics": 1_63_60,
"Scary": 19_19_85,
"Support": 1_26_54,
"Technologies": 3_25_16,
"Teenage": 6_61_60,
"Event": 3_27_69,
"Learned": 6_74_60,
"Notion": 18_27_70,
"Wikipedia": 3_75_83,
"Books": 66_65,
"Extract": 7_60_50,
"Confessions": 10_27_01,
"Conspiracy": 7_59_32,
"Links": 6_36_74,
"Narcissus": 15_04_25,
"Relationship": 5_47_66,
"Relationships": 13_47_96,
"Reviews": 4_16_71,
"News": 42_56,
"Translation": 2_68_20,
"multilingual": 12_84_06,
}
def lowercase__ ( A_: List[Any] ) -> Dict:
"""simple docstring"""
__UpperCAmelCase =set()
__UpperCAmelCase =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase =char
__UpperCAmelCase =set(A_ )
return pairs
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[str] = CONTROL_CODES
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any="<unk>" , **__SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]:
super().__init__(unk_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as vocab_handle:
__UpperCAmelCase =json.load(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase ={v: k for k, v in self.encoder.items()}
with open(__SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as merges_handle:
__UpperCAmelCase =merges_handle.read().split("""\n""" )[1:-1]
__UpperCAmelCase =[tuple(merge.split() ) for merge in merges]
__UpperCAmelCase =dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
__UpperCAmelCase ={}
@property
def _a ( self : int ) -> List[Any]:
return len(self.encoder )
def _a ( self : List[Any] ) -> Union[str, Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : List[str] ) -> Any:
if token in self.cache:
return self.cache[token]
__UpperCAmelCase =tuple(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
__UpperCAmelCase =get_pairs(__SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
__UpperCAmelCase =min(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : self.bpe_ranks.get(__SCREAMING_SNAKE_CASE , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase =bigram
__UpperCAmelCase =[]
__UpperCAmelCase =0
while i < len(__SCREAMING_SNAKE_CASE ):
try:
__UpperCAmelCase =word.index(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase =j
if word[i] == first and i < len(__SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase =tuple(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =new_word
if len(__SCREAMING_SNAKE_CASE ) == 1:
break
else:
__UpperCAmelCase =get_pairs(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase ="""@@ """.join(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =word[:-4]
__UpperCAmelCase =word
return word
def _a ( self : Any , __SCREAMING_SNAKE_CASE : Any ) -> Tuple:
__UpperCAmelCase =[]
__UpperCAmelCase =re.findall(R"""\S+\n?""" , __SCREAMING_SNAKE_CASE )
for token in words:
split_tokens.extend(list(self.bpe(__SCREAMING_SNAKE_CASE ).split(""" """ ) ) )
return split_tokens
def _a ( self : Any , __SCREAMING_SNAKE_CASE : Tuple ) -> str:
return self.encoder.get(__SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Dict ) -> List[str]:
return self.decoder.get(__SCREAMING_SNAKE_CASE , self.unk_token )
def _a ( self : str , __SCREAMING_SNAKE_CASE : int ) -> List[str]:
__UpperCAmelCase =""" """.join(__SCREAMING_SNAKE_CASE ).replace("""@@ """ , """""" ).strip()
return out_string
def _a ( self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase =os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCAmelCase =os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__SCREAMING_SNAKE_CASE , ensure_ascii=__SCREAMING_SNAKE_CASE ) + """\n""" )
__UpperCAmelCase =0
with open(__SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
__UpperCAmelCase =token_index
writer.write(""" """.join(__SCREAMING_SNAKE_CASE ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 68 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCamelCase ( _A ):
snake_case_ = "sew-d"
def __init__( self , a_=32 , a_=768 , a_=12 , a_=12 , a_=3_072 , a_=2 , a_=512 , a_=256 , a_=True , a_=True , a_=("p2c", "c2p") , a_="layer_norm" , a_="gelu_python" , a_=0.1 , a_=0.1 , a_=0.1 , a_=0.0 , a_=0.1 , a_=0.02 , a_=1e-7 , a_=1e-5 , a_="group" , a_="gelu" , a_=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , a_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , a_=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , a_=False , a_=128 , a_=16 , a_=True , a_=0.05 , a_=10 , a_=2 , a_=0.0 , a_=10 , a_=0 , a_="mean" , a_=False , a_=False , a_=256 , a_=0 , a_=1 , a_=2 , **a_ , ):
super().__init__(**a_ , pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ )
lowerCAmelCase : Optional[int] = hidden_size
lowerCAmelCase : Optional[int] = feat_extract_norm
lowerCAmelCase : str = feat_extract_activation
lowerCAmelCase : Dict = list(a_ )
lowerCAmelCase : int = list(a_ )
lowerCAmelCase : Any = list(a_ )
lowerCAmelCase : Optional[Any] = conv_bias
lowerCAmelCase : Tuple = num_conv_pos_embeddings
lowerCAmelCase : int = num_conv_pos_embedding_groups
lowerCAmelCase : Optional[int] = len(self.conv_dim )
lowerCAmelCase : Dict = num_hidden_layers
lowerCAmelCase : str = intermediate_size
lowerCAmelCase : Any = squeeze_factor
lowerCAmelCase : Optional[int] = max_position_embeddings
lowerCAmelCase : Tuple = position_buckets
lowerCAmelCase : Union[str, Any] = share_att_key
lowerCAmelCase : str = relative_attention
lowerCAmelCase : List[str] = norm_rel_ebd
lowerCAmelCase : int = list(a_ )
lowerCAmelCase : str = hidden_act
lowerCAmelCase : Dict = num_attention_heads
lowerCAmelCase : Optional[Any] = hidden_dropout
lowerCAmelCase : int = attention_dropout
lowerCAmelCase : Optional[int] = activation_dropout
lowerCAmelCase : Any = feat_proj_dropout
lowerCAmelCase : Optional[int] = final_dropout
lowerCAmelCase : Union[str, Any] = layer_norm_eps
lowerCAmelCase : str = feature_layer_norm_eps
lowerCAmelCase : Tuple = initializer_range
lowerCAmelCase : int = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase : List[str] = apply_spec_augment
lowerCAmelCase : List[Any] = mask_time_prob
lowerCAmelCase : Tuple = mask_time_length
lowerCAmelCase : List[Any] = mask_time_min_masks
lowerCAmelCase : List[str] = mask_feature_prob
lowerCAmelCase : Union[str, Any] = mask_feature_length
lowerCAmelCase : List[Any] = mask_feature_min_masks
# ctc loss
lowerCAmelCase : Any = ctc_loss_reduction
lowerCAmelCase : str = ctc_zero_infinity
# sequence classification
lowerCAmelCase : Dict = use_weighted_layer_sum
lowerCAmelCase : Tuple = classifier_proj_size
@property
def _lowerCamelCase ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 525 | 0 |
'''simple docstring'''
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
_lowercase = True
from torch.cuda.amp import autocast
_lowercase = logging.getLogger(__name__)
def __UpperCamelCase ( a : Optional[Any]=None , a : List[str]=None ) ->Optional[Any]:
return field(default_factory=lambda: default , metadata=lowerCAmelCase__ )
@dataclass
class _lowercase :
_UpperCAmelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_UpperCAmelCase = field(
default=__a , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_UpperCAmelCase = field(
default=__a , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
_UpperCAmelCase = field(
default=0.1 , metadata={'''help''': '''The dropout ratio for the attention probabilities.'''} )
_UpperCAmelCase = field(
default=0.1 , metadata={'''help''': '''The dropout ratio for activations inside the fully connected layer.'''} )
_UpperCAmelCase = field(
default=0.1 , metadata={
'''help''': '''The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'''
} , )
_UpperCAmelCase = field(
default=0.1 , metadata={'''help''': '''The dropout probabilitiy for all 1D convolutional layers in feature extractor.'''} , )
_UpperCAmelCase = field(
default=0.05 , metadata={
'''help''': (
'''Propability of each feature vector along the time axis to be chosen as the start of the vector'''
'''span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'''
'''vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'''
)
} , )
_UpperCAmelCase = field(default=0.0 , metadata={'''help''': '''The LayerDrop probability.'''} )
@dataclass
class _lowercase :
_UpperCAmelCase = field(
default=__a , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
_UpperCAmelCase = field(
default='''train+validation''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
_UpperCAmelCase = field(
default=__a , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
_UpperCAmelCase = field(
default=__a , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
_UpperCAmelCase = field(
default=__a , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
_UpperCAmelCase = field(
default=__a , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of validation examples to this '''
'''value if set.'''
)
} , )
_UpperCAmelCase = list_field(
default=[''',''', '''?''', '''.''', '''!''', '''-''', ''';''', ''':''', '''""''', '''%''', '''\'''', '''"''', '''�'''] , metadata={'''help''': '''A list of characters to remove from the transcripts.'''} , )
@dataclass
class _lowercase :
_UpperCAmelCase = 42
_UpperCAmelCase = True
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
def __call__( self , A__ ) -> Dict:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
snake_case = [{'''input_values''': feature['''input_values''']} for feature in features]
snake_case = [{'''input_ids''': feature['''labels''']} for feature in features]
snake_case = self.processor.pad(
snake_case__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
snake_case = self.processor.pad(
labels=snake_case__ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='''pt''' , )
# replace padding with -100 to ignore loss correctly
snake_case = labels_batch['''input_ids'''].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_00 )
snake_case = labels
return batch
class _lowercase ( __a ):
def UpperCamelCase ( self , A__ , A__ ) -> int:
model.train()
snake_case = self._prepare_inputs(snake_case__ )
if self.use_amp:
with autocast():
snake_case = self.compute_loss(snake_case__ , snake_case__ )
else:
snake_case = self.compute_loss(snake_case__ , snake_case__ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
snake_case = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
snake_case = loss.sum() / (inputs['''labels'''] >= 0).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
snake_case = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(snake_case__ ).backward()
elif self.use_apex:
with amp.scale_loss(snake_case__ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(snake_case__ )
else:
loss.backward()
return loss.detach()
def __UpperCamelCase ( ) ->Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case , snake_case , snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case , snake_case , snake_case = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
snake_case = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , lowerCAmelCase__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
snake_case = datasets.load_dataset(
'''common_voice''' , data_args.dataset_config_name , split=data_args.train_split_name )
snake_case = datasets.load_dataset('''common_voice''' , data_args.dataset_config_name , split='''test''' )
# Create and save tokenizer
snake_case = f"""[{''.join(data_args.chars_to_ignore )}]"""
def remove_special_characters(a : Optional[int] ):
snake_case = re.sub(lowerCAmelCase__ , '''''' , batch['''sentence'''] ).lower() + ''' '''
return batch
snake_case = train_dataset.map(lowerCAmelCase__ , remove_columns=['''sentence'''] )
snake_case = eval_dataset.map(lowerCAmelCase__ , remove_columns=['''sentence'''] )
def extract_all_chars(a : str ):
snake_case = ''' '''.join(batch['''text'''] )
snake_case = list(set(lowerCAmelCase__ ) )
return {"vocab": [vocab], "all_text": [all_text]}
snake_case = train_dataset.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , batch_size=-1 , keep_in_memory=lowerCAmelCase__ , remove_columns=train_dataset.column_names , )
snake_case = train_dataset.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , batch_size=-1 , keep_in_memory=lowerCAmelCase__ , remove_columns=eval_dataset.column_names , )
snake_case = list(set(vocab_train['''vocab'''][0] ) | set(vocab_test['''vocab'''][0] ) )
snake_case = {v: k for k, v in enumerate(lowerCAmelCase__ )}
snake_case = vocab_dict[''' ''']
del vocab_dict[" "]
snake_case = len(lowerCAmelCase__ )
snake_case = len(lowerCAmelCase__ )
with open('''vocab.json''' , '''w''' ) as vocab_file:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case = WavaVecaCTCTokenizer(
'''vocab.json''' , unk_token='''[UNK]''' , pad_token='''[PAD]''' , word_delimiter_token='''|''' , )
snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0.0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ )
snake_case = WavaVecaProcessor(feature_extractor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
snake_case = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='''mean''' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
snake_case = min(len(lowerCAmelCase__ ) , data_args.max_train_samples )
snake_case = train_dataset.select(range(lowerCAmelCase__ ) )
if data_args.max_val_samples is not None:
snake_case = eval_dataset.select(range(data_args.max_val_samples ) )
snake_case = torchaudio.transforms.Resample(4_8000 , 1_6000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(a : Tuple ):
snake_case , snake_case = torchaudio.load(batch['''path'''] )
snake_case = resampler(lowerCAmelCase__ ).squeeze().numpy()
snake_case = 1_6000
snake_case = batch['''text''']
return batch
snake_case = train_dataset.map(
lowerCAmelCase__ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
snake_case = eval_dataset.map(
lowerCAmelCase__ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(a : Optional[Any] ):
# check that all files have the correct sampling rate
assert (
len(set(batch['''sampling_rate'''] ) ) == 1
), f"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
snake_case = processor(
audio=batch['''speech'''] , text=batch['''target_text'''] , sampling_rate=batch['''sampling_rate'''][0] )
batch.update(lowerCAmelCase__ )
return batch
snake_case = train_dataset.map(
lowerCAmelCase__ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=lowerCAmelCase__ , num_proc=data_args.preprocessing_num_workers , )
snake_case = eval_dataset.map(
lowerCAmelCase__ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=lowerCAmelCase__ , num_proc=data_args.preprocessing_num_workers , )
# Metric
snake_case = datasets.load_metric('''wer''' )
def compute_metrics(a : int ):
snake_case = pred.predictions
snake_case = np.argmax(lowerCAmelCase__ , axis=-1 )
snake_case = processor.tokenizer.pad_token_id
snake_case = processor.batch_decode(lowerCAmelCase__ )
# we do not want to group tokens when computing the metrics
snake_case = processor.batch_decode(pred.label_ids , group_tokens=lowerCAmelCase__ )
snake_case = wer_metric.compute(predictions=lowerCAmelCase__ , references=lowerCAmelCase__ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
snake_case = DataCollatorCTCWithPadding(processor=lowerCAmelCase__ , padding=lowerCAmelCase__ )
# Initialize our Trainer
snake_case = CTCTrainer(
model=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , args=lowerCAmelCase__ , compute_metrics=lowerCAmelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
snake_case = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
snake_case = model_args.model_name_or_path
else:
snake_case = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
snake_case = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
trainer.save_model()
snake_case = train_result.metrics
snake_case = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase__ )
)
snake_case = min(lowerCAmelCase__ , len(lowerCAmelCase__ ) )
trainer.log_metrics('''train''' , lowerCAmelCase__ )
trainer.save_metrics('''train''' , lowerCAmelCase__ )
trainer.save_state()
# Evaluation
snake_case = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
snake_case = trainer.evaluate()
snake_case = data_args.max_val_samples if data_args.max_val_samples is not None else len(lowerCAmelCase__ )
snake_case = min(lowerCAmelCase__ , len(lowerCAmelCase__ ) )
trainer.log_metrics('''eval''' , lowerCAmelCase__ )
trainer.save_metrics('''eval''' , lowerCAmelCase__ )
return results
if __name__ == "__main__":
main()
| 705 |
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _lowercase ( yaml.SafeLoader ):
def UpperCamelCase ( self , A__ ) -> List[str]:
snake_case = [self.constructed_objects[key_node] for key_node, _ in node.value]
snake_case = [tuple(A__ ) if isinstance(A__ , A__ ) else key for key in keys]
snake_case = Counter(A__ )
snake_case = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" )
def UpperCamelCase ( self , A__ , A__=False ) -> List[Any]:
snake_case = super().construct_mapping(A__ , deep=A__ )
self._check_no_duplicates_on_constructed_node(A__ )
return mapping
def __UpperCamelCase ( a : str ) ->Tuple[Optional[str], str]:
snake_case = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
snake_case = full_content[1:].index('''---''' ) + 1
snake_case = '''\n'''.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(a )
class _lowercase ( __a ):
# class attributes
_UpperCAmelCase = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def UpperCamelCase ( cls , A__ ) -> "DatasetMetadata":
with open(A__ , encoding='''utf-8''' ) as readme_file:
snake_case , snake_case = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(A__ )
else:
return cls()
def UpperCamelCase ( self , A__ ) -> str:
if path.exists():
with open(A__ , encoding='''utf-8''' ) as readme_file:
snake_case = readme_file.read()
else:
snake_case = None
snake_case = self._to_readme(A__ )
with open(A__ , '''w''' , encoding='''utf-8''' ) as readme_file:
readme_file.write(A__ )
def UpperCamelCase ( self , A__ = None ) -> str:
if readme_content is not None:
snake_case , snake_case = _split_yaml_from_readme(A__ )
snake_case = '''---\n''' + self.to_yaml_string() + '''---\n''' + content
else:
snake_case = '''---\n''' + self.to_yaml_string() + '''---\n'''
return full_content
@classmethod
def UpperCamelCase ( cls , A__ ) -> "DatasetMetadata":
snake_case = yaml.load(A__ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
snake_case = {
(key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**A__ )
def UpperCamelCase ( self ) -> str:
return yaml.safe_dump(
{
(key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=A__ , allow_unicode=A__ , encoding='''utf-8''' , ).decode('''utf-8''' )
_lowercase = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_lowercase = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
_lowercase = ap.parse_args()
_lowercase = Path(args.readme_filepath)
_lowercase = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 44 | 0 |
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : List[Any] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''',
'''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''',
'''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
lowercase__ : Union[str, Any] = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def __lowercase ( _a , _a , _a , _a , _a ):
for attribute in key.split('''.''' ):
snake_case_ : Optional[int] = getattr(_a , _a )
if weight_type is not None:
snake_case_ : Dict = getattr(_a , _a ).shape
else:
snake_case_ : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
snake_case_ : int = value
elif weight_type == "weight_g":
snake_case_ : Tuple = value
elif weight_type == "weight_v":
snake_case_ : List[str] = value
elif weight_type == "bias":
snake_case_ : List[str] = value
else:
snake_case_ : Any = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __lowercase ( _a , _a ):
snake_case_ : Optional[Any] = []
snake_case_ : str = fairseq_model.state_dict()
snake_case_ : Tuple = hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
_a , _a , _a , _a , hf_model.config.feat_extract_norm == '''group''' , )
snake_case_ : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
snake_case_ : Dict = True
if "*" in mapped_key:
snake_case_ : Optional[int] = name.split(_a )[0].split('''.''' )[-2]
snake_case_ : Dict = mapped_key.replace('''*''' , _a )
if "weight_g" in name:
snake_case_ : str = '''weight_g'''
elif "weight_v" in name:
snake_case_ : Tuple = '''weight_v'''
elif "bias" in name and "relative_attention_bias" not in name:
snake_case_ : Any = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case_ : Tuple = '''weight'''
else:
snake_case_ : List[Any] = None
set_recursively(_a , _a , _a , _a , _a )
continue
if not is_used:
unused_weights.append(_a )
logger.warning(f"Unused weights: {unused_weights}" )
def __lowercase ( _a , _a , _a , _a , _a ):
snake_case_ : Tuple = full_name.split('''conv_layers.''' )[-1]
snake_case_ : List[str] = name.split('''.''' )
snake_case_ : List[str] = int(items[0] )
snake_case_ : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
snake_case_ : Any = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
snake_case_ : int = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
snake_case_ : Optional[int] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
snake_case_ : List[str] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_a )
@torch.no_grad()
def __lowercase ( _a , _a , _a=None ):
# load the pre-trained checkpoints
snake_case_ : int = torch.load(_a )
snake_case_ : int = WavLMConfigOrig(checkpoint['''cfg'''] )
snake_case_ : List[str] = WavLMOrig(_a )
model.load_state_dict(checkpoint['''model'''] )
model.eval()
if config_path is not None:
snake_case_ : List[str] = WavLMConfig.from_pretrained(_a )
else:
snake_case_ : List[str] = WavLMConfig()
snake_case_ : int = WavLMModel(_a )
recursively_load_weights(_a , _a )
hf_wavlm.save_pretrained(_a )
if __name__ == "__main__":
lowercase__ : Any = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowercase__ : Optional[int] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 123 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : List[str] = KandinskyInpaintPipeline
_lowerCAmelCase : Tuple = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
_lowerCAmelCase : Optional[int] = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
_lowerCAmelCase : Union[str, Any] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_lowerCAmelCase : List[str] = False
@property
def _snake_case ( self : Optional[Any] ):
return 32
@property
def _snake_case ( self : List[Any] ):
return 32
@property
def _snake_case ( self : List[Any] ):
return self.time_input_dim
@property
def _snake_case ( self : Any ):
return self.time_input_dim * 4
@property
def _snake_case ( self : Any ):
return 100
@property
def _snake_case ( self : str ):
snake_case_ : str = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def _snake_case ( self : Dict ):
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
snake_case_ : Tuple = MultilingualCLIP(lowercase_ )
snake_case_ : List[Any] = text_encoder.eval()
return text_encoder
@property
def _snake_case ( self : int ):
torch.manual_seed(0 )
snake_case_ : Dict = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
snake_case_ : Union[str, Any] = UNetaDConditionModel(**lowercase_ )
return model
@property
def _snake_case ( self : int ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _snake_case ( self : Optional[int] ):
torch.manual_seed(0 )
snake_case_ : Any = VQModel(**self.dummy_movq_kwargs )
return model
def _snake_case ( self : Optional[int] ):
snake_case_ : Any = self.dummy_text_encoder
snake_case_ : Optional[int] = self.dummy_tokenizer
snake_case_ : Any = self.dummy_unet
snake_case_ : Tuple = self.dummy_movq
snake_case_ : List[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=lowercase_ , )
snake_case_ : int = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _snake_case ( self : int , lowercase_ : List[Any] , lowercase_ : Union[str, Any]=0 ):
snake_case_ : List[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
snake_case_ : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowercase_ )
# create init_image
snake_case_ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
snake_case_ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ : Any = Image.fromarray(np.uinta(lowercase_ ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
snake_case_ : Tuple = np.ones((64, 64) , dtype=np.floataa )
snake_case_ : Dict = 0
if str(lowercase_ ).startswith('''mps''' ):
snake_case_ : List[Any] = torch.manual_seed(lowercase_ )
else:
snake_case_ : List[str] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
snake_case_ : List[Any] = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def _snake_case ( self : str ):
snake_case_ : Any = '''cpu'''
snake_case_ : List[str] = self.get_dummy_components()
snake_case_ : Tuple = self.pipeline_class(**lowercase_ )
snake_case_ : int = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ : str = pipe(**self.get_dummy_inputs(lowercase_ ) )
snake_case_ : int = output.images
snake_case_ : Tuple = pipe(
**self.get_dummy_inputs(lowercase_ ) , return_dict=lowercase_ , )[0]
snake_case_ : Optional[int] = image[0, -3:, -3:, -1]
snake_case_ : Tuple = image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
snake_case_ : str = np.array(
[0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def _snake_case ( self : Any ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : Dict ):
snake_case_ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
snake_case_ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
snake_case_ : int = np.ones((768, 768) , dtype=np.floataa )
snake_case_ : str = 0
snake_case_ : Tuple = '''a hat'''
snake_case_ : Any = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(lowercase_ )
snake_case_ : List[str] = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
snake_case_ : Optional[Any] = pipeline.to(lowercase_ )
pipeline.set_progress_bar_config(disable=lowercase_ )
snake_case_ : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case_, snake_case_ : Tuple = pipe_prior(
lowercase_ , generator=lowercase_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
snake_case_ : Any = pipeline(
lowercase_ , image=lowercase_ , mask_image=lowercase_ , image_embeds=lowercase_ , negative_image_embeds=lowercase_ , generator=lowercase_ , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , )
snake_case_ : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ )
| 123 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowercase : Dict = logging.get_logger(__name__)
lowercase : List[str] = {"vocab_file": "spiece.model"}
lowercase : Dict = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<sep>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<cls>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=["<eop>", "<eod>"] , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None:
A : Dict = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
A : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
A : int = 3
A : List[str] = do_lower_case
A : Optional[Any] = remove_space
A : str = keep_accents
A : Any = vocab_file
A : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
A : int = jieba
A : str = str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def snake_case ( self ) -> Union[str, Any]:
return len(self.sp_model )
def snake_case ( self ) -> List[str]:
A : List[Any] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Tuple:
A : Optional[int] = self.__dict__.copy()
A : Any = None
return state
def __setstate__( self , __UpperCAmelCase ) -> Optional[int]:
A : int = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A : Dict = {}
A : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case ( self , __UpperCAmelCase ) -> Union[str, Any]:
if self.remove_space:
A : List[Any] = ''' '''.join(inputs.strip().split() )
else:
A : Dict = inputs
A : Optional[int] = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
A : Dict = unicodedata.normalize('''NFKD''' , __UpperCAmelCase )
A : int = ''''''.join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] )
if self.do_lower_case:
A : Tuple = outputs.lower()
return outputs
def snake_case ( self , __UpperCAmelCase ) -> List[str]:
A : str = self.preprocess_text(__UpperCAmelCase )
A : int = self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
A : Optional[int] = []
for piece in pieces:
if len(__UpperCAmelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
A : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A : Optional[int] = cur_pieces[1:]
else:
A : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCAmelCase )
else:
new_pieces.append(__UpperCAmelCase )
return new_pieces
def snake_case ( self , __UpperCAmelCase ) -> Dict:
return self.sp_model.PieceToId(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase ) -> Any:
return self.sp_model.IdToPiece(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase ) -> List[Any]:
A : List[str] = ''''''.join(__UpperCAmelCase ).replace(__UpperCAmelCase , ''' ''' ).strip()
return out_string
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
A : Dict = [self.sep_token_id]
A : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1, 1]
return ([0] * len(__UpperCAmelCase )) + [1, 1]
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
A : Any = [self.sep_token_id]
A : str = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A : Tuple = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , '''wb''' ) as fi:
A : List[Any] = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
A : Union[str, Any] = super()._decode(*__UpperCAmelCase , **__UpperCAmelCase )
A : str = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 423 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase : Any = abspath(join(dirname(dirname(__file__)), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def snake_case__ ( lowerCamelCase_ ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCamelCase_ )
def snake_case__ ( lowerCamelCase_ ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
A : Union[str, Any] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowerCamelCase_ , id=lowerCamelCase_ )
| 423 | 1 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
__UpperCamelCase = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
__UpperCamelCase = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
__UpperCamelCase = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
__UpperCamelCase = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
__UpperCamelCase = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def UpperCamelCase_( _A :Union[str, Any] , _A :Optional[Any] )-> Optional[Any]:
for tf_name, hf_name in patterns:
UpperCamelCase__ = k.replace(__UpperCamelCase , __UpperCamelCase )
return k
def UpperCamelCase_( _A :dict , _A :dict )-> BigBirdPegasusForConditionalGeneration:
UpperCamelCase__ = BigBirdPegasusConfig(**__UpperCamelCase )
UpperCamelCase__ = BigBirdPegasusForConditionalGeneration(__UpperCamelCase )
UpperCamelCase__ = torch_model.state_dict()
UpperCamelCase__ = {}
# separating decoder weights
UpperCamelCase__ = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
UpperCamelCase__ = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items() , "tf -> hf conversion" ):
UpperCamelCase__ = [k.endswith(__UpperCamelCase ) for ending in KEYS_TO_IGNORE]
if any(__UpperCamelCase ):
continue
UpperCamelCase__ = DECODER_PATTERNS
UpperCamelCase__ = rename_state_dict_key(__UpperCamelCase , __UpperCamelCase )
if new_k not in state_dict:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
UpperCamelCase__ = v.T
UpperCamelCase__ = torch.from_numpy(__UpperCamelCase )
assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items() , "tf -> hf conversion" ):
UpperCamelCase__ = [k.endswith(__UpperCamelCase ) for ending in KEYS_TO_IGNORE]
if any(__UpperCamelCase ):
continue
UpperCamelCase__ = REMAINING_PATTERNS
UpperCamelCase__ = rename_state_dict_key(__UpperCamelCase , __UpperCamelCase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
UpperCamelCase__ = v.T
UpperCamelCase__ = torch.from_numpy(__UpperCamelCase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
UpperCamelCase__ = mapping["model.embed_positions.weight"]
UpperCamelCase__ = mapping.pop("model.embed_positions.weight" )
UpperCamelCase__, UpperCamelCase__ = torch_model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
UpperCamelCase__ = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def UpperCamelCase_( _A :int )-> Dict:
UpperCamelCase__ = tf.train.list_variables(__UpperCamelCase )
UpperCamelCase__ = {}
UpperCamelCase__ = ["global_step"]
for name, shape in tqdm(__UpperCamelCase , desc="converting tf checkpoint to dict" ):
UpperCamelCase__ = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCamelCase__ = tf.train.load_variable(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase__ = array
return tf_weights
def UpperCamelCase_( _A :str , _A :str , _A :dict )-> str:
UpperCamelCase__ = get_tf_weights_as_numpy(__UpperCamelCase )
UpperCamelCase__ = convert_bigbird_pegasus(__UpperCamelCase , __UpperCamelCase )
torch_model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 551 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 144 | 0 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowercase__ ( lowerCAmelCase : int , lowerCAmelCase : str=0.999 , lowerCAmelCase : Dict="cosine" , ) -> str:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCAmelCase : Optional[Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCAmelCase : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
UpperCAmelCase = []
for i in range(lowerCAmelCase ):
UpperCAmelCase = i / num_diffusion_timesteps
UpperCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCAmelCase ) / alpha_bar_fn(lowerCAmelCase ) , lowerCAmelCase ) )
return torch.tensor(lowerCAmelCase , dtype=torch.floataa )
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE : Any = [e.name for e in KarrasDiffusionSchedulers]
__SCREAMING_SNAKE_CASE : Union[str, Any] = 2
@register_to_config
def __init__( self , lowercase_ = 1_0_0_0 , lowercase_ = 0.0_0_0_8_5 , lowercase_ = 0.0_1_2 , lowercase_ = "linear" , lowercase_ = None , lowercase_ = "epsilon" , lowercase_ = "linspace" , lowercase_ = 0 , ) -> Tuple:
if trained_betas is not None:
UpperCAmelCase = torch.tensor(lowercase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
UpperCAmelCase = torch.linspace(lowercase_ , lowercase_ , lowercase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCAmelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowercase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCAmelCase = betas_for_alpha_bar(lowercase_ )
else:
raise NotImplementedError(F"{beta_schedule} does is not implemented for {self.__class__}" )
UpperCAmelCase = 1.0 - self.betas
UpperCAmelCase = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(lowercase_ , lowercase_ , lowercase_ )
def a_ ( self , lowercase_ , lowercase_=None ) -> Dict:
if schedule_timesteps is None:
UpperCAmelCase = self.timesteps
UpperCAmelCase = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
UpperCAmelCase = 1 if len(lowercase_ ) > 1 else 0
else:
UpperCAmelCase = timestep.cpu().item() if torch.is_tensor(lowercase_ ) else timestep
UpperCAmelCase = self._index_counter[timestep_int]
return indices[pos].item()
@property
def a_ ( self ) -> Dict:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def a_ ( self , lowercase_ , lowercase_ , ) -> torch.FloatTensor:
UpperCAmelCase = self.index_for_timestep(lowercase_ )
if self.state_in_first_order:
UpperCAmelCase = self.sigmas[step_index]
else:
UpperCAmelCase = self.sigmas_interpol[step_index]
UpperCAmelCase = sample / ((sigma**2 + 1) ** 0.5)
return sample
def a_ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , ) -> Any:
UpperCAmelCase = num_inference_steps
UpperCAmelCase = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
UpperCAmelCase = np.linspace(0 , num_train_timesteps - 1 , lowercase_ , dtype=lowercase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
UpperCAmelCase = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase = (np.arange(0 , lowercase_ ) * step_ratio).round()[::-1].copy().astype(lowercase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
UpperCAmelCase = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase = (np.arange(lowercase_ , 0 , -step_ratio )).round().copy().astype(lowercase_ )
timesteps -= 1
else:
raise ValueError(
F"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." )
UpperCAmelCase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
UpperCAmelCase = torch.from_numpy(np.log(lowercase_ ) ).to(lowercase_ )
UpperCAmelCase = np.interp(lowercase_ , np.arange(0 , len(lowercase_ ) ) , lowercase_ )
UpperCAmelCase = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
UpperCAmelCase = torch.from_numpy(lowercase_ ).to(device=lowercase_ )
# interpolate sigmas
UpperCAmelCase = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
UpperCAmelCase = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
UpperCAmelCase = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(lowercase_ ).startswith('mps' ):
# mps does not support float64
UpperCAmelCase = torch.from_numpy(lowercase_ ).to(lowercase_ , dtype=torch.floataa )
else:
UpperCAmelCase = torch.from_numpy(lowercase_ ).to(lowercase_ )
# interpolate timesteps
UpperCAmelCase = self.sigma_to_t(lowercase_ ).to(lowercase_ , dtype=timesteps.dtype )
UpperCAmelCase = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
UpperCAmelCase = torch.cat([timesteps[:1], interleaved_timesteps] )
UpperCAmelCase = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
UpperCAmelCase = defaultdict(lowercase_ )
def a_ ( self , lowercase_ ) -> int:
# get log sigma
UpperCAmelCase = sigma.log()
# get distribution
UpperCAmelCase = log_sigma - self.log_sigmas[:, None]
# get sigmas range
UpperCAmelCase = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
UpperCAmelCase = low_idx + 1
UpperCAmelCase = self.log_sigmas[low_idx]
UpperCAmelCase = self.log_sigmas[high_idx]
# interpolate sigmas
UpperCAmelCase = (low - log_sigma) / (low - high)
UpperCAmelCase = w.clamp(0 , 1 )
# transform interpolation to time range
UpperCAmelCase = (1 - w) * low_idx + w * high_idx
UpperCAmelCase = t.view(sigma.shape )
return t
@property
def a_ ( self ) -> Dict:
return self.sample is None
def a_ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = True , ) -> Union[SchedulerOutput, Tuple]:
UpperCAmelCase = self.index_for_timestep(lowercase_ )
# advance index counter by 1
UpperCAmelCase = timestep.cpu().item() if torch.is_tensor(lowercase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
UpperCAmelCase = self.sigmas[step_index]
UpperCAmelCase = self.sigmas_interpol[step_index + 1]
UpperCAmelCase = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
UpperCAmelCase = self.sigmas[step_index - 1]
UpperCAmelCase = self.sigmas_interpol[step_index]
UpperCAmelCase = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
UpperCAmelCase = 0
UpperCAmelCase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
UpperCAmelCase = sigma_hat if self.state_in_first_order else sigma_interpol
UpperCAmelCase = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase = sigma_hat if self.state_in_first_order else sigma_interpol
UpperCAmelCase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('prediction_type not implemented yet: sample' )
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
UpperCAmelCase = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
UpperCAmelCase = sigma_interpol - sigma_hat
# store for 2nd order step
UpperCAmelCase = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
UpperCAmelCase = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
UpperCAmelCase = sigma_next - sigma_hat
UpperCAmelCase = self.sample
UpperCAmelCase = None
UpperCAmelCase = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowercase_ )
def a_ ( self , lowercase_ , lowercase_ , lowercase_ , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCAmelCase = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(lowercase_ ):
# mps does not support float64
UpperCAmelCase = self.timesteps.to(original_samples.device , dtype=torch.floataa )
UpperCAmelCase = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
UpperCAmelCase = self.timesteps.to(original_samples.device )
UpperCAmelCase = timesteps.to(original_samples.device )
UpperCAmelCase = [self.index_for_timestep(lowercase_ , lowercase_ ) for t in timesteps]
UpperCAmelCase = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
UpperCAmelCase = sigma.unsqueeze(-1 )
UpperCAmelCase = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 183 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE : str = (DDPMScheduler,)
def a_ ( self , **lowercase_ ) -> Union[str, Any]:
UpperCAmelCase = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**lowercase_ )
return config
def a_ ( self ) -> int:
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def a_ ( self ) -> Optional[int]:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def a_ ( self ) -> Dict:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def a_ ( self ) -> Union[str, Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowercase_ )
def a_ ( self ) -> Optional[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase_ )
def a_ ( self ) -> Tuple:
self.check_over_configs(thresholding=lowercase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowercase_ , prediction_type=lowercase_ , sample_max_value=lowercase_ , )
def a_ ( self ) -> Any:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def a_ ( self ) -> Union[str, Any]:
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=lowercase_ )
def a_ ( self ) -> Optional[Any]:
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.0_2 ) ) < 1E-5
def a_ ( self ) -> str:
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase_ )
UpperCAmelCase = len(lowercase_ )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter
UpperCAmelCase = torch.manual_seed(0 )
for t in reversed(range(lowercase_ ) ):
# 1. predict noise residual
UpperCAmelCase = model(lowercase_ , lowercase_ )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCAmelCase = pred_prev_sample
UpperCAmelCase = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1E-3
def a_ ( self ) -> Union[str, Any]:
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(prediction_type='v_prediction' )
UpperCAmelCase = scheduler_class(**lowercase_ )
UpperCAmelCase = len(lowercase_ )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter
UpperCAmelCase = torch.manual_seed(0 )
for t in reversed(range(lowercase_ ) ):
# 1. predict noise residual
UpperCAmelCase = model(lowercase_ , lowercase_ )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCAmelCase = pred_prev_sample
UpperCAmelCase = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1E-3
def a_ ( self ) -> Optional[Any]:
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase_ )
UpperCAmelCase = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=lowercase_ )
UpperCAmelCase = scheduler.timesteps
for i, timestep in enumerate(lowercase_ ):
if i == len(lowercase_ ) - 1:
UpperCAmelCase = -1
else:
UpperCAmelCase = timesteps[i + 1]
UpperCAmelCase = scheduler.previous_timestep(lowercase_ )
UpperCAmelCase = prev_t.item()
self.assertEqual(lowercase_ , lowercase_ )
def a_ ( self ) -> Optional[Any]:
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase_ )
UpperCAmelCase = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(lowercase_ , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=lowercase_ )
def a_ ( self ) -> List[Any]:
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase_ )
UpperCAmelCase = [1_0_0, 8_7, 5_0, 1, 0]
UpperCAmelCase = len(lowercase_ )
with self.assertRaises(lowercase_ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=lowercase_ , timesteps=lowercase_ )
def a_ ( self ) -> str:
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase_ )
UpperCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowercase_ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=lowercase_ )
| 183 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> list:
SCREAMING_SNAKE_CASE__ = len(lowercase__ )
for _ in range(lowercase__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_A = list(range(1_0, 0, -1))
print(F'Original: {arr}. Sorted: {odd_even_transposition(arr)}')
| 159 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {"vocab_file": "vocab.json"}
_UpperCAmelCase : Optional[Any] = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
_UpperCAmelCase : Tuple = {"mgp-str": 27}
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :Union[str, Any] = VOCAB_FILES_NAMES
UpperCamelCase_ :Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any]="[GO]" , SCREAMING_SNAKE_CASE_ : List[Any]="[GO]" , SCREAMING_SNAKE_CASE_ : Optional[Any]="[s]" , SCREAMING_SNAKE_CASE_ : Any="[GO]" , **SCREAMING_SNAKE_CASE_ : Dict ):
super().__init__(
unk_token=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase__ = json.load(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {v: k for k, v in self.vocab.items()}
@property
def __snake_case ( self : List[Any] ):
return len(self.vocab )
def __snake_case ( self : Optional[int] ):
return dict(self.vocab , **self.added_tokens_encoder )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase__ = []
for s in text:
char_tokens.extend(SCREAMING_SNAKE_CASE_ )
return char_tokens
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : str ):
return self.vocab.get(SCREAMING_SNAKE_CASE_ , self.vocab.get(self.unk_token ) )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] ):
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE_ ) )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
return (vocab_file,)
| 668 | 0 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
snake_case_ = logging.get_logger(__name__)
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = R'''\w+[.]\d+'''
UpperCAmelCase_ : Tuple = re.findall(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
for pat in pats:
UpperCAmelCase_ : Optional[int] = key.replace(SCREAMING_SNAKE_CASE__, '''_'''.join(pat.split('''.''' ) ) )
return key
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : Dict ) -> str:
UpperCAmelCase_ : List[str] = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCAmelCase_ : int = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCAmelCase_ : Dict = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCAmelCase_ : str = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase_ : List[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCAmelCase_ : Union[str, Any] = pt_tensor.transpose(2, 3, 1, 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase_ : Tuple = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
UpperCAmelCase_ : str = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase_ : Dict = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase_ : int = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Dict=42 ) -> List[str]:
# Step 1: Convert pytorch tensor to numpy
UpperCAmelCase_ : int = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCAmelCase_ : Optional[int] = flax_model.init_weights(PRNGKey(SCREAMING_SNAKE_CASE__ ) )
UpperCAmelCase_ : Any = flatten_dict(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase_ : List[str] = rename_key(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[int] = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
UpperCAmelCase_ : Optional[Any] = rename_key_and_reshape_tensor(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
UpperCAmelCase_ : Dict = jnp.asarray(SCREAMING_SNAKE_CASE__ )
return unflatten_dict(SCREAMING_SNAKE_CASE__ )
| 710 |
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 10, SCREAMING_SNAKE_CASE__ : int = 22 ) -> int:
UpperCAmelCase_ : Optional[int] = range(1, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[Any] = range(1, SCREAMING_SNAKE_CASE__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(10, 22) = }''')
| 644 | 0 |
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_a : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
_a : List[Any] = 5
_a : Dict = 10
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( _A , unittest.TestCase ):
"""simple docstring"""
A = SpeechaTextTokenizer
A = False
A = True
def snake_case_ ( self ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ :Any = sp.SentencePieceProcessor()
spm_model.Load(_lowerCAmelCase )
lowerCAmelCase__ :Optional[int] = ["<s>", "<pad>", "</s>", "<unk>"]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(_lowerCAmelCase ) )]
lowerCAmelCase__ :Optional[int] = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
lowerCAmelCase__ :Optional[Any] = Path(self.tmpdirname )
save_json(_lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["spm_file"] )
lowerCAmelCase__ :Any = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = "<pad>"
lowerCAmelCase__ :Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) , _lowerCAmelCase )
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(_lowerCAmelCase ) , 1_001 )
def snake_case_ ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_001 )
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
lowerCAmelCase__ :Any = tokenizer.tokenize("This is a test" )
self.assertListEqual(_lowerCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [289, 50, 14, 174, 386] , )
lowerCAmelCase__ :Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_lowerCAmelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
lowerCAmelCase__ :Tuple = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
lowerCAmelCase__ :Any = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def snake_case_ ( self ):
'''simple docstring'''
# fmt: off
lowerCAmelCase__ :Any = {"input_ids": [[3_791, 797, 31, 11, 64, 797, 31, 2_429, 433, 12, 1_176, 12, 20, 786, 915, 142, 2_413, 240, 37, 3_238, 797, 31, 11, 35, 93, 915, 142, 2_413, 240, 37, 5_540, 567, 1_276, 93, 37, 610, 40, 62, 455, 657, 1_042, 123, 780, 177, 37, 309, 241, 1_298, 514, 20, 292, 2_737, 114, 2_469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3_388, 511, 459, 4, 3_555, 40, 321, 302, 705, 4, 3_388, 511, 583, 326, 5, 5, 5, 62, 3_310, 560, 177, 2_680, 217, 1_508, 32, 31, 853, 418, 64, 583, 511, 1_605, 62, 35, 93, 560, 177, 2_680, 217, 1_508, 1_521, 64, 583, 511, 519, 62, 20, 1_515, 764, 20, 149, 261, 5_625, 7_972, 20, 5_540, 567, 1_276, 93, 3_925, 1_675, 11, 15, 802, 7_972, 576, 217, 1_508, 11, 35, 93, 1_253, 2_441, 15, 289, 652, 31, 416, 321, 3_842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2_681, 1_153, 3_434, 20, 5_540, 37, 567, 126, 1_253, 2_441, 3_376, 449, 210, 431, 1_563, 177, 767, 5_540, 11, 1_203, 472, 11, 2_953, 685, 285, 364, 706, 1_153, 20, 6_799, 20, 2_869, 20, 4_464, 126, 40, 2_429, 20, 1_040, 866, 2_664, 418, 20, 318, 20, 1_726, 186, 20, 265, 522, 35, 93, 2_191, 4_634, 20, 1_040, 12, 6_799, 15, 228, 2_356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_575, 2_666, 684, 1_582, 1_176, 12, 627, 149, 619, 20, 4_902, 563, 11, 20, 149, 261, 3_420, 2_356, 174, 142, 4_714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , )
@require_sentencepiece
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A = '''valhalla/s2t_mustc_multilinguial_medium'''
A = '''C\'est trop cool'''
A = '''Esto es genial'''
@classmethod
def snake_case_ ( cls ):
'''simple docstring'''
lowerCAmelCase__ :SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def snake_case_ ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 11 )
def snake_case_ ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.vocab_size , 10_000 )
def snake_case_ ( self ):
'''simple docstring'''
self.assertIn(_lowerCAmelCase , self.tokenizer.all_special_ids )
lowerCAmelCase__ :str = [ES_CODE, 4, 1_601, 47, 7_647, 2]
lowerCAmelCase__ :int = self.tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
lowerCAmelCase__ :List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , _lowerCAmelCase )
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = "fr"
lowerCAmelCase__ :List[str] = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , _lowerCAmelCase )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = "fr"
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
lowerCAmelCase__ :int = "es"
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 145 |
from __future__ import annotations
def snake_case__ ( UpperCAmelCase : str ):
return [ord(UpperCAmelCase ) - 9_6 for elem in plain]
def snake_case__ ( UpperCAmelCase : list[int] ):
return "".join(chr(elem + 9_6 ) for elem in encoded )
def snake_case__ ( ):
lowerCAmelCase__ :Optional[int] = encode(input("-> " ).strip().lower() )
print("Encoded: " , UpperCAmelCase )
print("Decoded:" , decode(UpperCAmelCase ) )
if __name__ == "__main__":
main()
| 145 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Any , __A : List[Any] , __A : int=1_3 , __A : List[Any]=3_0 , __A : Dict=2 , __A : Any=3 , __A : List[Any]=True , __A : List[str]=True , __A : Optional[int]=3_2 , __A : List[str]=5 , __A : int=4 , __A : int=3_7 , __A : List[Any]="gelu" , __A : int=0.1 , __A : List[Any]=0.1 , __A : Optional[Any]=1_0 , __A : Optional[int]=0.0_2 , __A : Union[str, Any]=None , __A : List[Any]=2 , ):
"""simple docstring"""
_lowercase = parent
_lowercase = batch_size
_lowercase = image_size
_lowercase = patch_size
_lowercase = num_channels
_lowercase = is_training
_lowercase = use_labels
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = type_sequence_label_size
_lowercase = initializer_range
_lowercase = scope
_lowercase = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowercase = (image_size // patch_size) ** 2
_lowercase = num_patches + 1
def snake_case ( self : List[str] ):
"""simple docstring"""
_lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase = None
if self.use_labels:
_lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase = self.get_config()
return config, pixel_values, labels
def snake_case ( self : List[str] ):
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def snake_case ( self : Tuple , __A : int , __A : str , __A : List[str] ):
"""simple docstring"""
_lowercase = ViTModel(config=__A )
model.to(__A )
model.eval()
_lowercase = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : List[Any] , __A : Tuple , __A : List[str] , __A : Optional[int] ):
"""simple docstring"""
_lowercase = ViTForMaskedImageModeling(config=__A )
model.to(__A )
model.eval()
_lowercase = model(__A )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowercase = 1
_lowercase = ViTForMaskedImageModeling(__A )
model.to(__A )
model.eval()
_lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowercase = model(__A )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def snake_case ( self : Optional[int] , __A : List[str] , __A : Optional[int] , __A : int ):
"""simple docstring"""
_lowercase = self.type_sequence_label_size
_lowercase = ViTForImageClassification(__A )
model.to(__A )
model.eval()
_lowercase = model(__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowercase = 1
_lowercase = ViTForImageClassification(__A )
model.to(__A )
model.eval()
_lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowercase = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case ( self : Optional[int] ):
"""simple docstring"""
_lowercase = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) = config_and_inputs
_lowercase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ = (
{'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def snake_case ( self : Dict ):
"""simple docstring"""
_lowercase = ViTModelTester(self )
_lowercase = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=3_7 )
def snake_case ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def snake_case ( self : Tuple ):
"""simple docstring"""
pass
def snake_case ( self : Optional[int] ):
"""simple docstring"""
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , nn.Linear ) )
def snake_case ( self : str ):
"""simple docstring"""
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
_lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase = [*signature.parameters.keys()]
_lowercase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __A )
def snake_case ( self : Any ):
"""simple docstring"""
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def snake_case ( self : Optional[int] ):
"""simple docstring"""
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__A )
def snake_case ( self : str ):
"""simple docstring"""
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase = ViTModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def A__ ( ) -> Any:
_lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : str ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def snake_case ( self : str ):
"""simple docstring"""
_lowercase = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(__A )
_lowercase = self.default_image_processor
_lowercase = prepare_img()
_lowercase = image_processor(images=__A , return_tensors="pt" ).to(__A )
# forward pass
with torch.no_grad():
_lowercase = model(**__A )
# verify the logits
_lowercase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __A )
_lowercase = torch.tensor([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1e-4 ) )
@slow
def snake_case ( self : Any ):
"""simple docstring"""
_lowercase = ViTModel.from_pretrained("facebook/dino-vits8" ).to(__A )
_lowercase = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=4_8_0 )
_lowercase = prepare_img()
_lowercase = image_processor(images=__A , return_tensors="pt" )
_lowercase = inputs.pixel_values.to(__A )
# forward pass
with torch.no_grad():
_lowercase = model(__A , interpolate_pos_encoding=__A )
# verify the logits
_lowercase = torch.Size((1, 3_6_0_1, 3_8_4) )
self.assertEqual(outputs.last_hidden_state.shape , __A )
_lowercase = torch.tensor(
[[4.2_3_4_0, 4.3_9_0_6, -6.6_6_9_2], [4.5_4_6_3, 1.8_9_2_8, -6.7_2_5_7], [4.4_4_2_9, 0.8_4_9_6, -5.8_5_8_5]] ).to(__A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __A , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def snake_case ( self : Any ):
"""simple docstring"""
_lowercase = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
_lowercase = self.default_image_processor
_lowercase = prepare_img()
_lowercase = image_processor(images=__A , return_tensors="pt" )
_lowercase = inputs.pixel_values.to(__A )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_lowercase = model(__A )
| 712 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def A__ ( A_ , A_ , A_ ) -> str:
# Construct model
if gpta_config_file == "":
_lowercase = GPTaConfig()
else:
_lowercase = GPTaConfig.from_json_file(A_ )
_lowercase = GPTaModel(A_ )
# Load weights from numpy
load_tf_weights_in_gpta(A_ , A_ , A_ )
# Save pytorch-model
_lowercase = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
_lowercase = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , A_ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(A_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__magic_name__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
__magic_name__ : Optional[Any] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 602 | 0 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __lowercase :
"""simple docstring"""
_UpperCAmelCase = BlenderbotConfig
_UpperCAmelCase = {}
_UpperCAmelCase = """gelu"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=9_9 , lowerCAmelCase__=3_2 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=2_0 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = parent
SCREAMING_SNAKE_CASE_ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = seq_length
SCREAMING_SNAKE_CASE_ : Union[str, Any] = is_training
SCREAMING_SNAKE_CASE_ : Dict = use_labels
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : int = hidden_size
SCREAMING_SNAKE_CASE_ : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = intermediate_size
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Optional[Any] = eos_token_id
SCREAMING_SNAKE_CASE_ : str = pad_token_id
SCREAMING_SNAKE_CASE_ : List[Any] = bos_token_id
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE_ : Dict = prepare_blenderbot_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return config, inputs_dict
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = TFBlenderbotModel(config=lowerCAmelCase__ ).get_decoder()
SCREAMING_SNAKE_CASE_ : Optional[Any] = inputs_dict['input_ids']
SCREAMING_SNAKE_CASE_ : str = input_ids[:1, :]
SCREAMING_SNAKE_CASE_ : List[str] = inputs_dict['attention_mask'][:1, :]
SCREAMING_SNAKE_CASE_ : Optional[Any] = inputs_dict['head_mask']
SCREAMING_SNAKE_CASE_ : Dict = 1
# first forward pass
SCREAMING_SNAKE_CASE_ : Optional[int] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE_ : Any = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE_ : str = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE_ : Tuple = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE_ : str = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE_ : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase__ , lowerCAmelCase__ , rtol=1E-3 )
def a__ ( A__, A__, A__, A__=None, A__=None, A__=None, A__=None, A__=None, ):
if attention_mask is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.cast(tf.math.not_equal(A__, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
SCREAMING_SNAKE_CASE_ : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE_ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __lowercase (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
_UpperCAmelCase = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
_UpperCAmelCase = (
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFBlenderbotModelTester(self )
SCREAMING_SNAKE_CASE_ : Dict = ConfigTester(self , config_class=lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase__ )
@require_tokenizers
@require_tf
class __lowercase (unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase = ["""My friends are cool but they eat too many carbs."""]
_UpperCAmelCase = """facebook/blenderbot-400M-distill"""
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.tokenizer(self.src_text , return_tensors='tf' )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model.generate(
model_inputs.input_ids , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowerCAmelCase__ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 101 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowerCAmelCase : Union[List[PIL.Image.Image], np.ndarray]
__lowerCAmelCase : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 181 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_a: Dict = logging.get_logger(__name__)
_a: Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_a: List[str] = {
"""vocab_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
_a: str = {
"""yjernite/retribert-base-uncased""": 512,
}
_a: List[str] = {
"""yjernite/retribert-base-uncased""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( lowercase ):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ = RetriBertTokenizer
SCREAMING_SNAKE_CASE__ = ['input_ids', 'attention_mask']
def __init__( self : Dict , lowerCAmelCase : Any=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Tuple=True , lowerCAmelCase : Optional[Any]="[UNK]" , lowerCAmelCase : List[str]="[SEP]" , lowerCAmelCase : List[str]="[PAD]" , lowerCAmelCase : str="[CLS]" , lowerCAmelCase : List[str]="[MASK]" , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Union[str, Any]=None , **lowerCAmelCase : Any , ):
'''simple docstring'''
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ = getattr(lowerCAmelCase , normalizer_state.pop("type" ) )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = strip_accents
UpperCAmelCase_ = tokenize_chinese_chars
UpperCAmelCase_ = normalizer_class(**lowerCAmelCase )
UpperCAmelCase_ = do_lower_case
def __A ( self : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int=None ):
'''simple docstring'''
UpperCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self : Dict , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : int , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None ):
'''simple docstring'''
UpperCAmelCase_ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
| 709 |
def __lowerCAmelCase ( A ):
UpperCAmelCase_ = generate_pascal_triangle(A )
for row_idx in range(A ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def __lowerCAmelCase ( A ):
if not isinstance(A , A ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
UpperCAmelCase_ = []
for current_row_idx in range(A ):
UpperCAmelCase_ = populate_current_row(A , A )
triangle.append(A )
return triangle
def __lowerCAmelCase ( A , A ):
UpperCAmelCase_ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
UpperCAmelCase_ , UpperCAmelCase_ = 1, 1
for current_col_idx in range(1 , A ):
calculate_current_element(
A , A , A , A )
return current_row
def __lowerCAmelCase ( A , A , A , A , ):
UpperCAmelCase_ = triangle[current_row_idx - 1][current_col_idx - 1]
UpperCAmelCase_ = triangle[current_row_idx - 1][current_col_idx]
UpperCAmelCase_ = above_to_left_elt + above_to_right_elt
def __lowerCAmelCase ( A ):
if not isinstance(A , A ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
UpperCAmelCase_ = [[1]]
for row_index in range(1 , A ):
UpperCAmelCase_ = [0] + result[-1] + [0]
UpperCAmelCase_ = row_index + 1
# Calculate the number of distinct elements in a row
UpperCAmelCase_ = sum(divmod(A , 2 ) )
UpperCAmelCase_ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
UpperCAmelCase_ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
UpperCAmelCase_ = row_first_half + row_second_half
result.append(A )
return result
def __lowerCAmelCase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(A , A ) -> None:
UpperCAmelCase_ = F"{func.__name__}({value})"
UpperCAmelCase_ = timeit(F"__main__.{call}" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(A , A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 268 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _snake_case (metaclass=__SCREAMING_SNAKE_CASE):
__A : Any =["speech"]
def __init__( self ,*_snake_case ,**_snake_case ):
requires_backends(self ,["speech"] )
class _snake_case (metaclass=__SCREAMING_SNAKE_CASE):
__A : Dict =["speech"]
def __init__( self ,*_snake_case ,**_snake_case ):
requires_backends(self ,["speech"] )
| 71 |
_a = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_a = [{"type": "code", "content": INSTALL_CONTENT}]
_a = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 481 | 0 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class a__ :
@staticmethod
def __SCREAMING_SNAKE_CASE( *_A , **_A ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class a__ ( unittest.TestCase ):
_a : List[Any] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
__lowerCAmelCase = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = object_detector(examples[0] , threshold=0.0 )
__lowerCAmelCase = len(_A )
self.assertGreater(_A , 0 )
self.assertEqual(
_A , [
{
"score": ANY(_A ),
"label": ANY(_A ),
"box": {"xmin": ANY(_A ), "ymin": ANY(_A ), "xmax": ANY(_A ), "ymax": ANY(_A )},
}
for i in range(_A )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
@require_torch
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
__lowerCAmelCase = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{"score": 0.72_35, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.72_18, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.71_84, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.67_48, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.66_56, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.66_14, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.64_56, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.6_42, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.64_19, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
] , )
__lowerCAmelCase = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
[
{"score": 0.72_35, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.72_18, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.71_84, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.67_48, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.66_56, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.66_14, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.64_56, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.6_42, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.64_19, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
]
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = pipeline("zero-shot-object-detection" )
__lowerCAmelCase = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{"score": 0.28_68, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.2_77, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.25_37, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.14_74, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.12_08, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
] , )
__lowerCAmelCase = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
[
{"score": 0.28_68, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.2_77, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.25_37, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.14_74, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.12_08, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
[
{"score": 0.28_68, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.2_77, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.25_37, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.14_74, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.12_08, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
@require_torch
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = 0.2
__lowerCAmelCase = pipeline("zero-shot-object-detection" )
__lowerCAmelCase = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=_A , )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{"score": 0.28_68, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.2_77, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.25_37, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = 2
__lowerCAmelCase = pipeline("zero-shot-object-detection" )
__lowerCAmelCase = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=_A , )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{"score": 0.28_68, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.2_77, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
] , )
| 552 |
def _a ( SCREAMING_SNAKE_CASE_ : int ):
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
__lowerCAmelCase = 1
__lowerCAmelCase = 1
while repunit:
__lowerCAmelCase = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _a ( SCREAMING_SNAKE_CASE_ : int = 1_00_00_00 ):
__lowerCAmelCase = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(SCREAMING_SNAKE_CASE_ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f'''{solution() = }''')
| 552 | 1 |
import argparse
import os
import re
import packaging.version
A_ : Tuple = 'examples/'
A_ : Union[str, Any] = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
A_ : Dict = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
A_ : Union[str, Any] = 'README.md'
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Tuple:
with open(UpperCAmelCase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase_: List[str] = f.read()
UpperCamelCase_ ,UpperCamelCase_: int = REPLACE_PATTERNS[pattern]
UpperCamelCase_: Tuple = replace.replace('VERSION' , UpperCAmelCase__ )
UpperCamelCase_: Tuple = re_pattern.sub(UpperCAmelCase__ , UpperCAmelCase__ )
with open(UpperCAmelCase__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(UpperCAmelCase__ )
def snake_case (UpperCAmelCase__ ) -> Any:
for folder, directories, fnames in os.walk(UpperCAmelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , pattern='examples' )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__=False ) -> List[Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if not patch:
update_version_in_examples(UpperCAmelCase__ )
def snake_case () -> List[str]:
UpperCamelCase_: Any = '🤗 Transformers currently provides the following architectures'
UpperCamelCase_: Any = '1. Want to contribute a new model?'
with open(UpperCAmelCase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase_: List[Any] = f.readlines()
# Find the start of the list.
UpperCamelCase_: Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCamelCase_: List[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
UpperCamelCase_: Union[str, Any] = lines[index].replace(
'https://huggingface.co/docs/diffusers/main/model_doc' , 'https://huggingface.co/docs/diffusers/model_doc' , )
index += 1
with open(UpperCAmelCase__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(UpperCAmelCase__ )
def snake_case () -> Tuple:
with open(REPLACE_FILES['init'] , 'r' ) as f:
UpperCamelCase_: List[Any] = f.read()
UpperCamelCase_: int = REPLACE_PATTERNS['init'][0].search(UpperCAmelCase__ ).groups()[0]
return packaging.version.parse(UpperCAmelCase__ )
def snake_case (UpperCAmelCase__=False ) -> int:
UpperCamelCase_: Optional[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
UpperCamelCase_: Dict = default_version.base_version
elif patch:
UpperCamelCase_: str = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
UpperCamelCase_: Dict = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
UpperCamelCase_: Tuple = input(F'''Which version are you releasing? [{default_version}]''' )
if len(UpperCAmelCase__ ) == 0:
UpperCamelCase_: Optional[Any] = default_version
print(F'''Updating version to {version}.''' )
global_version_update(UpperCAmelCase__ , patch=UpperCAmelCase__ )
def snake_case () -> int:
UpperCamelCase_: Optional[int] = get_version()
UpperCamelCase_: int = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
UpperCamelCase_: Optional[Any] = current_version.base_version
# Check with the user we got that right.
UpperCamelCase_: List[Any] = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(UpperCAmelCase__ ) == 0:
UpperCamelCase_: Dict = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(UpperCAmelCase__ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
A_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
A_ : Union[str, Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work() | 57 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=a , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=a , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=a )
return parser.parse_args()
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = parse_args()
# Import training_script as a module.
SCREAMING_SNAKE_CASE_ : Dict = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = script_fpath.stem
SCREAMING_SNAKE_CASE_ : int = importlib.import_module(a )
# Patch sys.argv
SCREAMING_SNAKE_CASE_ : int = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 511 | 0 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""1.0.0a"""):
raise Exception("""requires fairseq >= 1.0.0a""")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Union[str, Any] = """Hello world! cécé herlolip"""
def __A ( _A , _A , _A ):
"""simple docstring"""
__a = FairseqRobertaModel.from_pretrained(_A )
roberta.eval() # disable dropout
__a = roberta.model.encoder.sentence_encoder
__a = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
__a = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" , _A )
__a = XLMRobertaXLForSequenceClassification(_A ) if classification_head else XLMRobertaXLForMaskedLM(_A )
model.eval()
# Now let's copy all the weights.
# Embeddings
__a = roberta_sent_encoder.embed_tokens.weight
__a = roberta_sent_encoder.embed_positions.weight
__a = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
__a = roberta_sent_encoder.layer_norm.weight
__a = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__a = model.roberta.encoder.layer[i]
__a = roberta_sent_encoder.layers[i]
__a = layer.attention
__a = roberta_layer.self_attn_layer_norm.weight
__a = roberta_layer.self_attn_layer_norm.bias
# self attention
__a = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
__a = roberta_layer.self_attn.q_proj.weight
__a = roberta_layer.self_attn.q_proj.bias
__a = roberta_layer.self_attn.k_proj.weight
__a = roberta_layer.self_attn.k_proj.bias
__a = roberta_layer.self_attn.v_proj.weight
__a = roberta_layer.self_attn.v_proj.bias
# self-attention output
__a = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
__a = roberta_layer.self_attn.out_proj.weight
__a = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
__a = roberta_layer.final_layer_norm.weight
__a = roberta_layer.final_layer_norm.bias
# intermediate
__a = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
__a = roberta_layer.fca.weight
__a = roberta_layer.fca.bias
# output
__a = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
__a = roberta_layer.fca.weight
__a = roberta_layer.fca.bias
# end of layer
if classification_head:
__a = roberta.model.classification_heads["mnli"].dense.weight
__a = roberta.model.classification_heads["mnli"].dense.bias
__a = roberta.model.classification_heads["mnli"].out_proj.weight
__a = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
__a = roberta.model.encoder.lm_head.dense.weight
__a = roberta.model.encoder.lm_head.dense.bias
__a = roberta.model.encoder.lm_head.layer_norm.weight
__a = roberta.model.encoder.lm_head.layer_norm.bias
__a = roberta.model.encoder.lm_head.weight
__a = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
__a = roberta.encode(_A ).unsqueeze(0 ) # batch of size 1
__a = model(_A )[0]
if classification_head:
__a = roberta.model.classification_heads["mnli"](roberta.extract_features(_A ) )
else:
__a = roberta.model(_A )[0]
print(our_output.shape , their_output.shape )
__a = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
__a = torch.allclose(_A , _A , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(_A ).mkdir(parents=_A , exist_ok=_A )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_A )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--roberta_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 525 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
def __A ( _A , _A=False , _A=False ):
"""simple docstring"""
__a = "backbone." if is_semantic else ""
__a = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""{prefix}blocks.{i}.norm1.weight""", f"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm1.bias""", f"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.weight""", f"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.bias""", f"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.weight""", f"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.bias""", f"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.weight""", f"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.bias""", f"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.weight""", f"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.bias""", f"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(f"""{prefix}cls_token""", "beit.embeddings.cls_token"),
(f"""{prefix}patch_embed.proj.weight""", "beit.embeddings.patch_embeddings.projection.weight"),
(f"""{prefix}patch_embed.proj.bias""", "beit.embeddings.patch_embeddings.projection.bias"),
(f"""{prefix}pos_embed""", "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __A ( _A , _A , _A=False , _A=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
__a = "backbone." if is_semantic else ""
# queries, keys and values
__a = state_dict.pop(f"""{prefix}blocks.{i}.attn.qkv.weight""" )
__a = state_dict.pop(f"""{prefix}blocks.{i}.attn.q_bias""" )
__a = state_dict.pop(f"""{prefix}blocks.{i}.attn.v_bias""" )
__a = in_proj_weight[
: config.hidden_size, :
]
__a = q_bias
__a = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__a = in_proj_weight[
-config.hidden_size :, :
]
__a = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
__a = state_dict.pop(f"""{prefix}blocks.{i}.gamma_1""" )
__a = state_dict.pop(f"""{prefix}blocks.{i}.gamma_2""" )
__a = gamma_a
__a = gamma_a
def __A ( _A , _A , _A ):
"""simple docstring"""
__a = dct.pop(_A )
__a = val
def __A ( ):
"""simple docstring"""
__a = "http://images.cocodataset.org/val2017/000000039769.jpg"
__a = Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def __A ( _A , _A , _A=False ):
"""simple docstring"""
__a = False if "rvlcdip" in checkpoint_url else True
__a = BeitConfig(use_absolute_position_embeddings=_A , use_mask_token=_A )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
__a = 1024
__a = 4096
__a = 24
__a = 16
# labels
if "rvlcdip" in checkpoint_url:
__a = 16
__a = "huggingface/label-files"
__a = "rvlcdip-id2label.json"
__a = json.load(open(hf_hub_download(_A , _A , repo_type="dataset" ) , "r" ) )
__a = {int(_A ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
__a = torch.hub.load_state_dict_from_url(_A , map_location="cpu" )["model"]
__a = create_rename_keys(_A , has_lm_head=_A )
for src, dest in rename_keys:
rename_key(_A , _A , _A )
read_in_q_k_v(_A , _A , has_lm_head=_A )
# load HuggingFace model
__a = BeitForMaskedImageModeling(_A ) if has_lm_head else BeitForImageClassification(_A )
model.eval()
model.load_state_dict(_A )
# Check outputs on an image
__a = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_A )
__a = prepare_img()
__a = image_processor(images=_A , return_tensors="pt" )
__a = encoding["pixel_values"]
__a = model(_A )
__a = outputs.logits
# verify logits
__a = [1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(_A ), "Shape of logits not as expected"
Path(_A ).mkdir(exist_ok=_A )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_A )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_A )
if push_to_hub:
if has_lm_head:
__a = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
__a = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(_A , _A ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=_A , )
model.push_to_hub(
repo_path_or_name=Path(_A , _A ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=_A , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 525 | 1 |
from __future__ import annotations
def snake_case_ (__A : list[float] ) -> bool:
if len(__A ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
__lowerCAmelCase : List[Any] = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 651 |
from __future__ import annotations
import requests
def snake_case_ (__A : str ) -> dict:
__lowerCAmelCase : Tuple = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(__A ).json()
def snake_case_ (__A : int = 1_0 ) -> list[dict]:
__lowerCAmelCase : List[Any] = """https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"""
__lowerCAmelCase : Union[str, Any] = requests.get(__A ).json()[:max_stories]
return [get_hackernews_story(__A ) for story_id in story_ids]
def snake_case_ (__A : int = 1_0 ) -> str:
__lowerCAmelCase : Optional[Any] = hackernews_top_stories(__A )
return "\n".join("""* [{title}]({url})""".format(**__A ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 651 | 1 |
from __future__ import annotations
from math import gcd
def _A ( lowerCamelCase , lowerCamelCase = 2 , lowerCamelCase = 1 , lowerCamelCase = 3 , ):
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError("The input value cannot be less than 2" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int:
return (pow(lowerCamelCase , 2 ) + step) % modulus
for _ in range(lowerCamelCase ):
# These track the position within the cycle detection logic.
a__ : str = seed
a__ : Optional[int] = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
a__ : Union[str, Any] = rand_fn(lowerCamelCase , lowerCamelCase , lowerCamelCase )
a__ : Optional[Any] = rand_fn(lowerCamelCase , lowerCamelCase , lowerCamelCase )
a__ : Tuple = rand_fn(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
a__ : Optional[Any] = gcd(hare - tortoise , lowerCamelCase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
a__ : Optional[Any] = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
SCREAMING_SNAKE_CASE__ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""num""",
type=int,
help="""The value to find a divisor of""",
)
parser.add_argument(
"""--attempts""",
type=int,
default=3,
help="""The number of attempts before giving up""",
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Optional[int] = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f'{args.num} is probably prime')
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = args.num // divisor
print(f'{args.num} = {divisor} * {quotient}')
| 708 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
SCREAMING_SNAKE_CASE__ : List[str] = {
"""tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""",
"""tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""",
"""base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""",
"""base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""",
"""small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""",
"""small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""",
"""medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""",
"""medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""",
"""large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""",
"""large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""",
}
def _A ( lowerCamelCase ):
a__ : Optional[int] = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(lowerCamelCase , lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = {
"""blocks""": """layers""",
"""mlp.0""": """fc1""",
"""mlp.2""": """fc2""",
"""mlp_ln""": """final_layer_norm""",
""".attn.query""": """.self_attn.q_proj""",
""".attn.key""": """.self_attn.k_proj""",
""".attn.value""": """.self_attn.v_proj""",
""".attn_ln""": """.self_attn_layer_norm""",
""".attn.out""": """.self_attn.out_proj""",
""".cross_attn.query""": """.encoder_attn.q_proj""",
""".cross_attn.key""": """.encoder_attn.k_proj""",
""".cross_attn.value""": """.encoder_attn.v_proj""",
""".cross_attn_ln""": """.encoder_attn_layer_norm""",
""".cross_attn.out""": """.encoder_attn.out_proj""",
"""decoder.ln.""": """decoder.layer_norm.""",
"""encoder.ln.""": """encoder.layer_norm.""",
"""token_embedding""": """embed_tokens""",
"""encoder.positional_embedding""": """encoder.embed_positions.weight""",
"""decoder.positional_embedding""": """decoder.embed_positions.weight""",
"""ln_post""": """layer_norm""",
}
def _A ( lowerCamelCase ):
a__ : Tuple = list(s_dict.keys() )
for key in keys:
a__ : Optional[Any] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
a__ : Optional[int] = new_key.replace(lowerCamelCase , lowerCamelCase )
print(F"""{key} -> {new_key}""" )
a__ : Dict = s_dict.pop(lowerCamelCase )
return s_dict
def _A ( lowerCamelCase ):
a__ , a__ : Any = emb.weight.shape
a__ : Optional[Any] = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
a__ : Optional[Any] = emb.weight.data
return lin_layer
def _A ( lowerCamelCase , lowerCamelCase ):
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
a__ : Optional[Any] = os.path.basename(lowerCamelCase )
a__ : List[Any] = url.split("/" )[-2]
a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase )
if os.path.exists(lowerCamelCase ) and not os.path.isfile(lowerCamelCase ):
raise RuntimeError(F"""{download_target} exists and is not a regular file""" )
if os.path.isfile(lowerCamelCase ):
a__ : Any = open(lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(lowerCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(lowerCamelCase ) as source, open(lowerCamelCase , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=lowerCamelCase , unit_divisor=1024 ) as loop:
while True:
a__ : Optional[Any] = source.read(8192 )
if not buffer:
break
output.write(lowerCamelCase )
loop.update(len(lowerCamelCase ) )
a__ : Optional[int] = open(lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(lowerCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def _A ( lowerCamelCase , lowerCamelCase ):
if ".pt" not in checkpoint_path:
a__ : str = _download(_MODELS[checkpoint_path] )
else:
a__ : str = torch.load(lowerCamelCase , map_location="cpu" )
a__ : Dict = original_checkpoint["dims"]
a__ : Optional[int] = original_checkpoint["model_state_dict"]
a__ : Any = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(lowerCamelCase )
rename_keys(lowerCamelCase )
a__ : Optional[Any] = True
a__ : Optional[Any] = state_dict["decoder.layers.0.fc1.weight"].shape[0]
a__ : Tuple = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=lowerCamelCase , decoder_ffn_dim=lowerCamelCase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
a__ : Optional[Any] = WhisperForConditionalGeneration(lowerCamelCase )
a__ , a__ : Tuple = model.model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
if len(lowerCamelCase ) > 0 and not set(lowerCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
a__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
a__ : str = proj_out_weights
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 629 | 0 |
"""simple docstring"""
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCAmelCase ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : int = PhobertTokenizer
_snake_case : str = False
def A ( self : Tuple )-> Tuple:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase = ["T@@", "i", "I", "R@@", "r", "e@@"]
__UpperCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
__UpperCamelCase = ["#version: 0.2", "l à</w>"]
__UpperCamelCase = {"unk_token": "<unk>"}
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(A_ ) )
def A ( self : Tuple , **A_ : Union[str, Any] )-> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **A_ )
def A ( self : Optional[Any] , A_ : Optional[Any] )-> Any:
__UpperCamelCase = "Tôi là VinAI Research"
__UpperCamelCase = "T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"
return input_text, output_text
def A ( self : Dict )-> List[str]:
__UpperCamelCase = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase = "Tôi là VinAI Research"
__UpperCamelCase = "T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h".split()
__UpperCamelCase = tokenizer.tokenize(A_ )
print(A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase = tokens + [tokenizer.unk_token]
__UpperCamelCase = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ ) | 505 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
_A = logging.get_logger(__name__)
_A = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all MVP models at https://huggingface.co/models?filter=mvp
_A = {
"vocab_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json",
},
"added_tokens.json": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json",
},
"merges_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt",
},
"tokenizer_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json",
},
}
_A = {
"RUCAIBox/mvp": 1_024,
}
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : Dict = VOCAB_FILES_NAMES
_snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Tuple = ['input_ids', 'attention_mask']
_snake_case : Any = MvpTokenizer
def __init__( self : str , A_ : int=None , A_ : List[Any]=None , A_ : Optional[Any]=None , A_ : int="replace" , A_ : int="<s>" , A_ : Any="</s>" , A_ : List[str]="</s>" , A_ : Optional[int]="<s>" , A_ : Optional[int]="<unk>" , A_ : Optional[int]="<pad>" , A_ : Union[str, Any]="<mask>" , A_ : str=False , A_ : List[str]=True , **A_ : Union[str, Any] , )-> Any:
super().__init__(
A_ , A_ , tokenizer_file=A_ , errors=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , trim_offsets=A_ , **A_ , )
__UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , A_ ) != add_prefix_space:
__UpperCamelCase = getattr(A_ , pre_tok_state.pop("type" ) )
__UpperCamelCase = add_prefix_space
__UpperCamelCase = pre_tok_class(**A_ )
__UpperCamelCase = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__UpperCamelCase = "post_processor"
__UpperCamelCase = getattr(self.backend_tokenizer , A_ , A_ )
if tokenizer_component_instance:
__UpperCamelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__UpperCamelCase = tuple(state["sep"] )
if "cls" in state:
__UpperCamelCase = tuple(state["cls"] )
__UpperCamelCase = False
if state.get("add_prefix_space" , A_ ) != add_prefix_space:
__UpperCamelCase = add_prefix_space
__UpperCamelCase = True
if state.get("trim_offsets" , A_ ) != trim_offsets:
__UpperCamelCase = trim_offsets
__UpperCamelCase = True
if changes_to_apply:
__UpperCamelCase = getattr(A_ , state.pop("type" ) )
__UpperCamelCase = component_class(**A_ )
setattr(self.backend_tokenizer , A_ , A_ )
@property
def A ( self : List[str] )-> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A ( self : Any , A_ : List[Any] )-> List[Any]:
__UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else value
__UpperCamelCase = value
def A ( self : str , *A_ : Dict , **A_ : Dict )-> BatchEncoding:
__UpperCamelCase = kwargs.get("is_split_into_words" , A_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*A_ , **A_ )
def A ( self : Tuple , *A_ : str , **A_ : List[str] )-> BatchEncoding:
__UpperCamelCase = kwargs.get("is_split_into_words" , A_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*A_ , **A_ )
def A ( self : Optional[int] , A_ : str , A_ : Optional[str] = None )-> Tuple[str]:
__UpperCamelCase = self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
def A ( self : Any , A_ : Dict , A_ : Dict=None )-> Union[str, Any]:
__UpperCamelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A ( self : Optional[int] , A_ : List[int] , A_ : Optional[List[int]] = None )-> List[int]:
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 505 | 1 |
__SCREAMING_SNAKE_CASE = """0.18.2"""
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor | 708 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Tuple=13 , __lowerCamelCase : List[str]=30 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : Any=3 , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : int=32 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Any=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=2 , ) -> str:
A : List[Any] = parent
A : Optional[int] = batch_size
A : Any = image_size
A : Optional[Any] = patch_size
A : Optional[Any] = num_channels
A : Tuple = is_training
A : Optional[Any] = use_labels
A : Union[str, Any] = hidden_size
A : Tuple = num_hidden_layers
A : Union[str, Any] = num_attention_heads
A : Union[str, Any] = intermediate_size
A : Any = hidden_act
A : Tuple = hidden_dropout_prob
A : Dict = attention_probs_dropout_prob
A : Any = type_sequence_label_size
A : Tuple = initializer_range
A : List[Any] = scope
A : Optional[int] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
A : List[str] = (image_size // patch_size) ** 2
A : List[str] = num_patches + 2
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
A : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : List[Any] = None
if self.use_labels:
A : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : Dict = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str] ) -> int:
A : Optional[int] = DeiTModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any ) -> Any:
A : List[Any] = DeiTForMaskedImageModeling(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A : List[str] = 1
A : Optional[int] = DeiTForMaskedImageModeling(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : int ) -> Dict:
A : str = self.type_sequence_label_size
A : List[str] = DeiTForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A : Any = 1
A : str = DeiTForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]:
A : Dict = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) ,
) : Tuple = config_and_inputs
A : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ):
'''simple docstring'''
a__ = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
a__ = (
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[int]:
A : str = DeiTModelTester(self )
A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int:
pass
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
A , A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Dict = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]:
A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Union[str, Any] = model_class(__lowerCamelCase )
A : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Any = [*signature.parameters.keys()]
A : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Dict:
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=False ) -> str:
A : Union[str, Any] = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
if not self.model_tester.is_training:
return
A , A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
A : Dict = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__lowerCamelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
A : Union[str, Any] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
A : Dict = model(**__lowerCamelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A : Tuple = False
A : Any = True
for model_class in self.all_model_classes:
if model_class in get_values(__lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
A : List[str] = model_class(__lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(__lowerCamelCase )
model.train()
A : Dict = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
A : Tuple = model(**__lowerCamelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A : int = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__lowerCamelCase ),
*get_values(__lowerCamelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ):
A : Tuple = problem_type["title"]
A : Optional[Any] = problem_type["num_labels"]
A : List[str] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if problem_type["num_labels"] > 1:
A : List[str] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
A : int = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__lowerCamelCase ) as warning_list:
A : Optional[Any] = model(**__lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Optional[Any] = DeiTModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCAmelCase ( ):
A : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]:
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]:
A : Optional[int] = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
__lowerCamelCase )
A : List[Any] = self.default_image_processor
A : List[Any] = prepare_img()
A : Optional[Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
A : List[str] = model(**__lowerCamelCase )
# verify the logits
A : str = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
A : List[str] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
A : str = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
A : Dict = self.default_image_processor
A : Optional[int] = prepare_img()
A : Union[str, Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" )
A : Union[str, Any] = inputs.pixel_values.to(__lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
A : List[str] = model(__lowerCamelCase ) | 17 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __lowercase ( unittest.TestCase ):
def UpperCamelCase__ ( self ) -> List[str]:
__a = tempfile.mkdtemp()
# fmt: off
__a = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__a = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
__a = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
__a = {'unk_token': '<unk>'}
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCamelCase ) )
__a = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'image_std': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
__a = os.path.join(self.tmpdirname , UpperCamelCase )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ ( self , **UpperCamelCase ) -> Any:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase__ ( self , **UpperCamelCase ) -> int:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase__ ( self , **UpperCamelCase ) -> List[str]:
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase__ ( self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ) -> List[Any]:
__a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__a = [Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self ) -> Optional[int]:
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
__a = self.get_image_processor()
__a = CLIPProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
__a = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase )
__a = CLIPProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
__a = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase )
def UpperCamelCase__ ( self ) -> str:
__a = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__a = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__a = self.get_image_processor(do_normalize=UpperCamelCase , padding_value=1.0 )
__a = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase__ ( self ) -> Optional[Any]:
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = CLIPProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
__a = self.prepare_image_inputs()
__a = image_processor(UpperCamelCase , return_tensors='np' )
__a = processor(images=UpperCamelCase , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase__ ( self ) -> Any:
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = CLIPProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
__a = 'lower newer'
__a = processor(text=UpperCamelCase )
__a = tokenizer(UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase__ ( self ) -> List[str]:
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = CLIPProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
__a = 'lower newer'
__a = self.prepare_image_inputs()
__a = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase ):
processor()
def UpperCamelCase__ ( self ) -> List[str]:
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = CLIPProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
__a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a = processor.batch_decode(UpperCamelCase )
__a = tokenizer.batch_decode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ ( self ) -> int:
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = CLIPProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
__a = 'lower newer'
__a = self.prepare_image_inputs()
__a = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 539 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
UpperCAmelCase_ = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 539 | 1 |
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=1E-12 ) -> Tuple:
lowercase__ : Tuple = jnp.divide(emb_a.T ,jnp.clip(jnp.linalg.norm(__A ,axis=1 ) ,a_min=__A ) ).T
lowercase__ : Any = jnp.divide(emb_a.T ,jnp.clip(jnp.linalg.norm(__A ,axis=1 ) ,a_min=__A ) ).T
return jnp.matmul(__A ,norm_emb_a.T )
class UpperCAmelCase( nn.Module ):
"""simple docstring"""
a : Dict = 4_2
a : Union[str, Any] = jnp.floataa
def __a ( self ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[str] = FlaxCLIPVisionModule(self.config.vision_config )
lowercase__ : Dict = nn.Dense(self.config.projection_dim , use_bias=_UpperCamelCase , dtype=self.dtype )
lowercase__ : str = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
lowercase__ : int = self.param(
"special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
lowercase__ : Any = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) )
lowercase__ : Dict = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) )
def __call__( self , lowerCamelCase ) -> Any:
"""simple docstring"""
lowercase__ : Optional[Any] = self.vision_model(_UpperCamelCase )[1]
lowercase__ : int = self.visual_projection(_UpperCamelCase )
lowercase__ : int = jax_cosine_distance(_UpperCamelCase , self.special_care_embeds )
lowercase__ : Dict = jax_cosine_distance(_UpperCamelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowercase__ : Optional[Any] = 0.0
lowercase__ : List[str] = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowercase__ : List[str] = jnp.round(_UpperCamelCase , 3 )
lowercase__ : Any = jnp.any(special_scores > 0 , axis=1 , keepdims=_UpperCamelCase )
# Use a lower threshold if an image has any special care concept
lowercase__ : List[Any] = is_special_care * 0.01
lowercase__ : str = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowercase__ : Optional[Any] = jnp.round(_UpperCamelCase , 3 )
lowercase__ : Union[str, Any] = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class UpperCAmelCase( __lowerCAmelCase ):
"""simple docstring"""
a : Optional[int] = CLIPConfig
a : Dict = """clip_input"""
a : Optional[int] = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = 0 , lowerCamelCase = jnp.floataa , lowerCamelCase = True , **lowerCamelCase , ) -> Dict:
"""simple docstring"""
if input_shape is None:
lowercase__ : int = (1, 224, 224, 3)
lowercase__ : Union[str, Any] = self.module_class(config=_UpperCamelCase , dtype=_UpperCamelCase , **_UpperCamelCase )
super().__init__(_UpperCamelCase , _UpperCamelCase , input_shape=_UpperCamelCase , seed=_UpperCamelCase , dtype=_UpperCamelCase , _do_init=_do_init )
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None ) -> FrozenDict:
"""simple docstring"""
lowercase__ : List[Any] = jax.random.normal(_UpperCamelCase , _UpperCamelCase )
lowercase__ : Optional[Any] = jax.random.split(_UpperCamelCase )
lowercase__ : Tuple = {"""params""": params_rng, """dropout""": dropout_rng}
lowercase__ : Tuple = self.module.init(_UpperCamelCase , _UpperCamelCase )["""params"""]
return random_params
def __call__( self , lowerCamelCase , lowerCamelCase = None , ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[Any] = jnp.transpose(_UpperCamelCase , (0, 2, 3, 1) )
return self.module.apply(
{"params": params or self.params} , jnp.array(_UpperCamelCase , dtype=jnp.floataa ) , rngs={} , ) | 715 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
__a , __a , __a : Optional[Any] = False, False, False
@dataclass
class UpperCAmelCase:
"""simple docstring"""
a : Optional[int] = None
a : bool = True
a : bool = True
a : Optional[str] = None
# Automatically constructed
a : ClassVar[str] = "dict"
a : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
a : str = field(default="""Audio""" , init=snake_case_ , repr=snake_case_ )
def __call__( self ) -> Optional[int]:
"""simple docstring"""
return self.pa_type
def __a ( self , lowerCamelCase ) -> dict:
"""simple docstring"""
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(lowerCamelCase , lowerCamelCase ):
return {"bytes": None, "path": value}
elif isinstance(lowerCamelCase , lowerCamelCase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
lowercase__ : int = BytesIO()
sf.write(lowerCamelCase , value["array"] , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
lowercase__ : List[str] = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 32767
else:
lowercase__ : int = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 32767
lowercase__ : str = BytesIO(bytes() )
sf.write(lowerCamelCase , lowerCamelCase , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def __a ( self , lowerCamelCase , lowerCamelCase = None ) -> dict:
"""simple docstring"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
lowercase__ , lowercase__ : Optional[Any] = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
lowercase__ : List[str] = xsplitext(lowerCamelCase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
lowercase__ : List[str] = token_per_repo_id or {}
lowercase__ : Union[str, Any] = path.split("::" )[-1]
try:
lowercase__ : Optional[Any] = string_to_dict(lowerCamelCase , config.HUB_DATASETS_URL )["repo_id"]
lowercase__ : Dict = token_per_repo_id[repo_id]
except (ValueError, KeyError):
lowercase__ : List[Any] = None
with xopen(lowerCamelCase , "rb" , use_auth_token=lowerCamelCase ) as f:
lowercase__ , lowercase__ : List[str] = sf.read(lowerCamelCase )
else:
lowercase__ , lowercase__ : Dict = sf.read(lowerCamelCase )
lowercase__ : List[str] = array.T
if self.mono:
lowercase__ : List[Any] = librosa.to_mono(lowerCamelCase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
lowercase__ : List[Any] = librosa.resample(lowerCamelCase , orig_sr=lowerCamelCase , target_sr=self.sampling_rate )
lowercase__ : Tuple = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __a ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def __a ( self , lowerCamelCase ) -> pa.StructArray:
"""simple docstring"""
if pa.types.is_string(storage.type ):
lowercase__ : List[Any] = pa.array([None] * len(lowerCamelCase ) , type=pa.binary() )
lowercase__ : int = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowercase__ : Union[str, Any] = pa.array([None] * len(lowerCamelCase ) , type=pa.string() )
lowercase__ : Optional[int] = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
lowercase__ : List[Any] = pa.array([Audio().encode_example(lowerCamelCase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
lowercase__ : Optional[int] = storage.field("bytes" )
else:
lowercase__ : Optional[int] = pa.array([None] * len(lowerCamelCase ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
lowercase__ : str = storage.field("path" )
else:
lowercase__ : Any = pa.array([None] * len(lowerCamelCase ) , type=pa.string() )
lowercase__ : str = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
return array_cast(lowerCamelCase , self.pa_type )
def __a ( self , lowerCamelCase ) -> pa.StructArray:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(lowerCamelCase ):
with xopen(lowerCamelCase , "rb" ) as f:
lowercase__ : Tuple = f.read()
return bytes_
lowercase__ : List[str] = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowercase__ : Tuple = pa.array(
[os.path.basename(lowerCamelCase ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
lowercase__ : List[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(lowerCamelCase , self.pa_type ) | 298 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a : int = {
"configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = ["MobileViTFeatureExtractor"]
a : str = ["MobileViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileViTForImageClassification",
"MobileViTForSemanticSegmentation",
"MobileViTModel",
"MobileViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
"TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileViTForImageClassification",
"TFMobileViTForSemanticSegmentation",
"TFMobileViTModel",
"TFMobileViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 679 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
a : Optional[int] = logging.get_logger(__name__)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = R"\w+[.]\d+"
UpperCAmelCase : Dict = re.findall(__magic_name__ , __magic_name__ )
for pat in pats:
UpperCAmelCase : Tuple = key.replace(__magic_name__ , "_".join(pat.split("." ) ) )
return key
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCAmelCase : Dict = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCAmelCase : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase : int = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
UpperCAmelCase : Union[str, Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase : Union[str, Any] = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__=42 ):
'''simple docstring'''
UpperCAmelCase : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCAmelCase : Tuple = flax_model.init_weights(PRNGKey(__magic_name__ ) )
UpperCAmelCase : Optional[Any] = flatten_dict(__magic_name__ )
UpperCAmelCase : List[str] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase : Tuple = rename_key(__magic_name__ )
UpperCAmelCase : List[str] = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
UpperCAmelCase , UpperCAmelCase : Optional[int] = rename_key_and_reshape_tensor(__magic_name__ , __magic_name__ , __magic_name__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
UpperCAmelCase : Optional[int] = jnp.asarray(__magic_name__ )
return unflatten_dict(__magic_name__ )
| 679 | 1 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase_ : int = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class SCREAMING_SNAKE_CASE__ :
snake_case__ : Union[str, Any] = PegasusConfig
snake_case__ : int = {}
snake_case__ : str = '''gelu'''
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple=1_3 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : int=9_9 , SCREAMING_SNAKE_CASE__ : Optional[int]=3_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_7 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=2_0 , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : Tuple=1 , SCREAMING_SNAKE_CASE__ : str=0 , ) -> Optional[Any]:
a_ : List[Any] = parent
a_ : Optional[int] = batch_size
a_ : str = seq_length
a_ : Any = is_training
a_ : str = use_labels
a_ : Union[str, Any] = vocab_size
a_ : List[str] = hidden_size
a_ : Any = num_hidden_layers
a_ : Tuple = num_attention_heads
a_ : int = intermediate_size
a_ : Union[str, Any] = hidden_dropout_prob
a_ : Optional[Any] = attention_probs_dropout_prob
a_ : Union[str, Any] = max_position_embeddings
a_ : List[str] = eos_token_id
a_ : Optional[Any] = pad_token_id
a_ : List[Any] = bos_token_id
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
a_ : List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
a_ : int = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
a_ : Dict = np.concatenate([input_ids, eos_tensor] , axis=1 )
a_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
a_ : int = prepare_pegasus_inputs_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]:
a_ : List[str] = 2_0
a_ : Optional[Any] = model_class_name(SCREAMING_SNAKE_CASE__ )
a_ : Any = model.encode(inputs_dict['input_ids'] )
a_ , a_ : Optional[Any] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
a_ : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
a_ : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
a_ : List[str] = model.decode(
decoder_input_ids[:, :-1] , SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ , decoder_position_ids=SCREAMING_SNAKE_CASE__ , )
a_ : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
a_ : Optional[Any] = model.decode(
decoder_input_ids[:, -1:] , SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=SCREAMING_SNAKE_CASE__ , )
a_ : List[Any] = model.decode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Tuple:
a_ : List[str] = 2_0
a_ : Tuple = model_class_name(SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = model.encode(inputs_dict['input_ids'] )
a_ , a_ : Optional[int] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
a_ : int = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
a_ : str = model.init_cache(decoder_input_ids.shape[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
a_ : List[str] = model.decode(
decoder_input_ids[:, :-1] , SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ , decoder_position_ids=SCREAMING_SNAKE_CASE__ , )
a_ : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
a_ : List[Any] = model.decode(
decoder_input_ids[:, -1:] , SCREAMING_SNAKE_CASE__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , decoder_position_ids=SCREAMING_SNAKE_CASE__ , )
a_ : Optional[Any] = model.decode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ )
a_ : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] , __A : Any , __A : List[str] , __A : Any=None , __A : Any=None , ) -> Optional[Any]:
"""simple docstring"""
if attention_mask is None:
a_ : Optional[int] = np.not_equal(__A , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
a_ : Optional[int] = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ):
snake_case__ : Any = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
snake_case__ : str = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
snake_case__ : List[Any] = True
snake_case__ : Optional[Any] = False
snake_case__ : Optional[int] = False
snake_case__ : Union[str, Any] = False
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
a_ : List[str] = FlaxPegasusModelTester(self )
a_ : List[str] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
a_ , a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
a_ , a_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ , a_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
a_ : List[str] = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = model_class(SCREAMING_SNAKE_CASE__ )
@jax.jit
def encode_jitted(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
return model.encode(input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
with self.subTest('JIT Enabled' ):
a_ : Optional[int] = encode_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
a_ : Any = encode_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
a_ , a_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
a_ : int = model_class(SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
a_ : Dict = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] ):
return model.decode(
decoder_input_ids=SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , encoder_outputs=SCREAMING_SNAKE_CASE__ , )
with self.subTest('JIT Enabled' ):
a_ : Dict = decode_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
a_ : List[str] = decode_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
for model_class_name in self.all_model_classes:
a_ : int = model_class_name.from_pretrained('google/pegasus-large' , from_pt=SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = np.ones((1, 1) )
a_ : List[Any] = model(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
a_ : Optional[Any] = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum' )
a_ : str = PegasusTokenizer.from_pretrained('google/pegasus-xsum' )
a_ : Optional[Any] = [
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
a_ : Dict = [
'California\'s largest electricity provider has turned off power to hundreds of thousands of customers.',
'Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.',
]
a_ : Tuple = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='np' , truncation=SCREAMING_SNAKE_CASE__ , max_length=5_1_2 , padding=SCREAMING_SNAKE_CASE__ )
a_ : Tuple = model.generate(**SCREAMING_SNAKE_CASE__ , num_beams=2 ).sequences
a_ : int = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
assert tgt_text == decoded
| 443 |
import unittest
import numpy as np
def SCREAMING_SNAKE_CASE_ ( __A : np.ndarray , __A : np.ndarray , __A : np.ndarray , __A : np.ndarray | None = None , ) -> np.ndarray:
"""simple docstring"""
a_ : List[str] = np.shape(__A )
a_ : Any = np.shape(__A )
a_ : List[Any] = np.shape(__A )
if shape_a[0] != shape_b[0]:
a_ : List[Any] = (
'Expected the same number of rows for A and B. '
F"""Instead found A of size {shape_a} and B of size {shape_b}"""
)
raise ValueError(__A )
if shape_b[1] != shape_c[1]:
a_ : int = (
'Expected the same number of columns for B and C. '
F"""Instead found B of size {shape_b} and C of size {shape_c}"""
)
raise ValueError(__A )
a_ : Any = pseudo_inv
if a_inv is None:
try:
a_ : List[str] = np.linalg.inv(__A )
except np.linalg.LinAlgError:
raise ValueError(
'Input matrix A is not invertible. Cannot compute Schur complement.' )
return mat_c - mat_b.T @ a_inv @ mat_b
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> None:
a_ : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
a_ : Any = np.array([[0, 3], [3, 0], [2, 3]] )
a_ : List[str] = np.array([[2, 1], [6, 3]] )
a_ : int = schur_complement(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = np.block([[a, b], [b.T, c]] )
a_ : str = np.linalg.det(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = np.linalg.det(SCREAMING_SNAKE_CASE__ )
a_ : str = np.linalg.det(SCREAMING_SNAKE_CASE__ )
self.assertAlmostEqual(SCREAMING_SNAKE_CASE__ , det_a * det_s )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> None:
a_ : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
a_ : Dict = np.array([[0, 3], [3, 0], [2, 3]] )
a_ : str = np.array([[2, 1], [6, 3]] )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
schur_complement(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> None:
a_ : Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
a_ : Dict = np.array([[0, 3], [3, 0], [2, 3]] )
a_ : List[str] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
schur_complement(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 443 | 1 |
"""simple docstring"""
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class A_ ( _a , _a ):
@register_to_config
def __init__( self: Optional[Any] ,__lowerCAmelCase: bool ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: Optional[int] = None ):
'''simple docstring'''
super().__init__()
_lowerCamelCase : List[str] = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_lowerCamelCase : List[Any] = torch.zeros(__lowerCAmelCase ,__lowerCAmelCase )
else:
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Union[str, Any] = torch.nn.Parameter(__lowerCAmelCase )
class A_ ( _a ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self: Tuple ,__lowerCAmelCase: VQModel ,__lowerCAmelCase: CLIPTextModel ,__lowerCAmelCase: CLIPTokenizer ,__lowerCAmelCase: TransformeraDModel ,__lowerCAmelCase: VQDiffusionScheduler ,__lowerCAmelCase: LearnedClassifierFreeSamplingEmbeddings ,):
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=__lowerCAmelCase ,transformer=__lowerCAmelCase ,text_encoder=__lowerCAmelCase ,tokenizer=__lowerCAmelCase ,scheduler=__lowerCAmelCase ,learned_classifier_free_sampling_embeddings=__lowerCAmelCase ,)
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = len(__lowerCAmelCase ) if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else 1
# get prompt text embeddings
_lowerCamelCase : Dict = self.tokenizer(
__lowerCAmelCase ,padding="max_length" ,max_length=self.tokenizer.model_max_length ,return_tensors="pt" ,)
_lowerCamelCase : Any = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_lowerCamelCase : Optional[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
_lowerCamelCase : Any = text_input_ids[:, : self.tokenizer.model_max_length]
_lowerCamelCase : List[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_lowerCamelCase : int = prompt_embeds / prompt_embeds.norm(dim=-1 ,keepdim=__lowerCAmelCase )
# duplicate text embeddings for each generation per prompt
_lowerCamelCase : Union[str, Any] = prompt_embeds.repeat_interleave(__lowerCAmelCase ,dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_lowerCamelCase : Optional[int] = self.learned_classifier_free_sampling_embeddings.embeddings
_lowerCamelCase : int = negative_prompt_embeds.unsqueeze(0 ).repeat(__lowerCAmelCase ,1 ,1 )
else:
_lowerCamelCase : str = [""] * batch_size
_lowerCamelCase : Optional[Any] = text_input_ids.shape[-1]
_lowerCamelCase : Dict = self.tokenizer(
__lowerCAmelCase ,padding="max_length" ,max_length=__lowerCAmelCase ,truncation=__lowerCAmelCase ,return_tensors="pt" ,)
_lowerCamelCase : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
_lowerCamelCase : Union[str, Any] = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 ,keepdim=__lowerCAmelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_lowerCamelCase : Any = negative_prompt_embeds.shape[1]
_lowerCamelCase : int = negative_prompt_embeds.repeat(1 ,__lowerCAmelCase ,1 )
_lowerCamelCase : str = negative_prompt_embeds.view(batch_size * num_images_per_prompt ,__lowerCAmelCase ,-1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCamelCase : int = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self: Tuple ,__lowerCAmelCase: Union[str, List[str]] ,__lowerCAmelCase: int = 100 ,__lowerCAmelCase: float = 5.0 ,__lowerCAmelCase: float = 1.0 ,__lowerCAmelCase: int = 1 ,__lowerCAmelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None ,__lowerCAmelCase: Optional[torch.FloatTensor] = None ,__lowerCAmelCase: Optional[str] = "pil" ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,__lowerCAmelCase: int = 1 ,):
'''simple docstring'''
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : str = 1
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : str = len(__lowerCAmelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(__lowerCAmelCase )}""" )
_lowerCamelCase : Dict = batch_size * num_images_per_prompt
_lowerCamelCase : Tuple = guidance_scale > 1.0
_lowerCamelCase : Optional[Any] = self._encode_prompt(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowerCAmelCase ,__lowerCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(__lowerCAmelCase )}.""" )
# get the initial completely masked latents unless the user supplied it
_lowerCamelCase : Any = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
_lowerCamelCase : int = self.transformer.num_vector_embeds - 1
_lowerCamelCase : List[Any] = torch.full(__lowerCAmelCase ,__lowerCAmelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"
F""" {self.transformer.num_vector_embeds - 1} (inclusive).""" )
_lowerCamelCase : Union[str, Any] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__lowerCAmelCase ,device=self.device )
_lowerCamelCase : List[Any] = self.scheduler.timesteps.to(self.device )
_lowerCamelCase : List[Any] = latents
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the sample if we are doing classifier free guidance
_lowerCamelCase : Union[str, Any] = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_lowerCamelCase : List[Any] = self.transformer(__lowerCAmelCase ,encoder_hidden_states=__lowerCAmelCase ,timestep=__lowerCAmelCase ).sample
if do_classifier_free_guidance:
_lowerCamelCase, _lowerCamelCase : Optional[int] = model_output.chunk(2 )
_lowerCamelCase : Optional[int] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__lowerCAmelCase ,dim=1 ,keepdim=__lowerCAmelCase )
_lowerCamelCase : Dict = self.truncate(__lowerCAmelCase ,__lowerCAmelCase )
# remove `log(0)`'s (`-inf`s)
_lowerCamelCase : Any = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCamelCase : Dict = self.scheduler.step(__lowerCAmelCase ,timestep=__lowerCAmelCase ,sample=__lowerCAmelCase ,generator=__lowerCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Optional[int] = self.vqvae.config.vq_embed_dim
_lowerCamelCase : Any = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_lowerCamelCase : List[Any] = self.vqvae.quantize.get_codebook_entry(__lowerCAmelCase ,shape=__lowerCAmelCase )
_lowerCamelCase : Any = self.vqvae.decode(__lowerCAmelCase ,force_not_quantize=__lowerCAmelCase ).sample
_lowerCamelCase : List[Any] = (image / 2 + 0.5).clamp(0 ,1 )
_lowerCamelCase : Union[str, Any] = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
_lowerCamelCase : Dict = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
def _lowercase ( self: Tuple ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: float ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Tuple = torch.sort(__lowerCAmelCase ,1 ,descending=__lowerCAmelCase )
_lowerCamelCase : int = torch.exp(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_lowerCamelCase : Tuple = torch.full_like(keep_mask[:, 0:1, :] ,__lowerCAmelCase )
_lowerCamelCase : List[Any] = torch.cat((all_true, keep_mask) ,dim=1 )
_lowerCamelCase : List[str] = keep_mask[:, :-1, :]
_lowerCamelCase : Optional[Any] = keep_mask.gather(1 ,indices.argsort(1 ) )
_lowerCamelCase : List[str] = log_p_x_0.clone()
_lowerCamelCase : Optional[Any] = -torch.inf # -inf = log(0)
return rv | 46 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__snake_case = logging.get_logger(__name__)
def _lowercase ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Union[int, Iterable[int]] , SCREAMING_SNAKE_CASE_ : bool , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
def constraint_to_multiple_of(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int=0 , SCREAMING_SNAKE_CASE_ : List[Any]=None ):
UpperCamelCase = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
UpperCamelCase = math.floor(val / multiple ) * multiple
if x < min_val:
UpperCamelCase = math.ceil(val / multiple ) * multiple
return x
UpperCamelCase = (output_size, output_size) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else output_size
UpperCamelCase , UpperCamelCase = get_image_size(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase = output_size
# determine new height and width
UpperCamelCase = output_height / input_height
UpperCamelCase = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
UpperCamelCase = scale_width
else:
# fit height
UpperCamelCase = scale_height
UpperCamelCase = constraint_to_multiple_of(scale_height * input_height , multiple=SCREAMING_SNAKE_CASE_ )
UpperCamelCase = constraint_to_multiple_of(scale_width * input_width , multiple=SCREAMING_SNAKE_CASE_ )
return (new_height, new_width)
class UpperCAmelCase ( __snake_case ):
lowercase = ["""pixel_values"""]
def __init__( self : List[Any] , __magic_name__ : bool = True , __magic_name__ : Dict[str, int] = None , __magic_name__ : PILImageResampling = PILImageResampling.BILINEAR , __magic_name__ : bool = False , __magic_name__ : int = 1 , __magic_name__ : bool = True , __magic_name__ : Union[int, float] = 1 / 2_5_5 , __magic_name__ : bool = True , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , **__magic_name__ : Tuple , ):
"""simple docstring"""
super().__init__(**__magic_name__ )
UpperCamelCase = size if size is not None else {"""height""": 3_8_4, """width""": 3_8_4}
UpperCamelCase = get_size_dict(__magic_name__ )
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = keep_aspect_ratio
UpperCamelCase = ensure_multiple_of
UpperCamelCase = resample
UpperCamelCase = do_rescale
UpperCamelCase = rescale_factor
UpperCamelCase = do_normalize
UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase_ ( self : List[Any] , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : bool = False , __magic_name__ : int = 1 , __magic_name__ : PILImageResampling = PILImageResampling.BICUBIC , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : int , ):
"""simple docstring"""
UpperCamelCase = get_size_dict(__magic_name__ )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
UpperCamelCase = get_resize_output_image_size(
__magic_name__ , output_size=(size["""height"""], size["""width"""]) , keep_aspect_ratio=__magic_name__ , multiple=__magic_name__ , )
return resize(__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def lowerCamelCase_ ( self : Any , __magic_name__ : np.ndarray , __magic_name__ : Union[int, float] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : str , ):
"""simple docstring"""
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def lowerCamelCase_ ( self : int , __magic_name__ : np.ndarray , __magic_name__ : Union[float, List[float]] , __magic_name__ : Union[float, List[float]] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Dict , ):
"""simple docstring"""
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def lowerCamelCase_ ( self : List[str] , __magic_name__ : ImageInput , __magic_name__ : bool = None , __magic_name__ : int = None , __magic_name__ : bool = None , __magic_name__ : int = None , __magic_name__ : PILImageResampling = None , __magic_name__ : bool = None , __magic_name__ : float = None , __magic_name__ : bool = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[str, TensorType]] = None , __magic_name__ : ChannelDimension = ChannelDimension.FIRST , **__magic_name__ : int , ):
"""simple docstring"""
UpperCamelCase = do_resize if do_resize is not None else self.do_resize
UpperCamelCase = size if size is not None else self.size
UpperCamelCase = get_size_dict(__magic_name__ )
UpperCamelCase = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
UpperCamelCase = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
UpperCamelCase = resample if resample is not None else self.resample
UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase = image_mean if image_mean is not None else self.image_mean
UpperCamelCase = image_std if image_std is not None else self.image_std
UpperCamelCase = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
UpperCamelCase = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
UpperCamelCase = [self.resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_rescale:
UpperCamelCase = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
UpperCamelCase = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
UpperCamelCase = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
UpperCamelCase = {"""pixel_values""": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
def lowerCamelCase_ ( self : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : List[Tuple] = None ):
"""simple docstring"""
UpperCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(__magic_name__ ):
UpperCamelCase = target_sizes.numpy()
UpperCamelCase = []
for idx in range(len(__magic_name__ ) ):
UpperCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=__magic_name__ )
UpperCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__magic_name__ )
else:
UpperCamelCase = logits.argmax(dim=1 )
UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 386 | 0 |
'''simple docstring'''
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def UpperCAmelCase__ ( UpperCAmelCase__ :Any , UpperCAmelCase__ :str , UpperCAmelCase__ :Dict , UpperCAmelCase__ :Tuple , UpperCAmelCase__ :Union[str, Any] , UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :List[Any] , UpperCAmelCase__ :Tuple , UpperCAmelCase__ :Dict , UpperCAmelCase__ :Union[str, Any] , UpperCAmelCase__ :Optional[Any] , UpperCAmelCase__ :List[str] , ):
'''simple docstring'''
a = {
"7z": (seven_zip_file, SevenZipExtractor),
"bz2": (bza_file, BzipaExtractor),
"gzip": (gz_file, GzipExtractor),
"lz4": (lza_file, LzaExtractor),
"tar": (tar_file, TarExtractor),
"xz": (xz_file, XzExtractor),
"zip": (zip_file, ZipExtractor),
"zstd": (zstd_file, ZstdExtractor),
}
a , a = input_paths_and_base_extractors[compression_format]
if input_path is None:
a = F"""for \'{compression_format}\' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_A )
assert base_extractor.is_extractable(_A )
a = tmp_path / ("extracted" if is_archive else "extracted.txt")
base_extractor.extract(_A , _A )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
a = file_path.read_text(encoding="utf-8" )
else:
a = output_path.read_text(encoding="utf-8" )
a = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[Any] , UpperCAmelCase__ :Optional[Any] , UpperCAmelCase__ :Any , UpperCAmelCase__ :Tuple , UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :List[str] , UpperCAmelCase__ :Any , UpperCAmelCase__ :Union[str, Any] , UpperCAmelCase__ :Tuple , UpperCAmelCase__ :Optional[Any] , UpperCAmelCase__ :Union[str, Any] , UpperCAmelCase__ :str , ):
'''simple docstring'''
a = {
"7z": seven_zip_file,
"bz2": bza_file,
"gzip": gz_file,
"lz4": lza_file,
"tar": tar_file,
"xz": xz_file,
"zip": zip_file,
"zstd": zstd_file,
}
a = input_paths[compression_format]
if input_path is None:
a = F"""for \'{compression_format}\' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_A )
a = Extractor.infer_extractor_format(_A )
assert extractor_format is not None
a = tmp_path / ("extracted" if is_archive else "extracted.txt")
Extractor.extract(_A , _A , _A )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
a = file_path.read_text(encoding="utf-8" )
else:
a = output_path.read_text(encoding="utf-8" )
a = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[Any] , UpperCAmelCase__ :str ):
'''simple docstring'''
import tarfile
a = tmp_path / "data_dot_dot"
directory.mkdir()
a = directory / "tar_file_with_dot_dot.tar"
with tarfile.TarFile(_A , "w" ) as f:
f.add(_A , arcname=os.path.join(".." , text_file.name ) )
return path
@pytest.fixture
def UpperCAmelCase__ ( UpperCAmelCase__ :Dict ):
'''simple docstring'''
import tarfile
a = tmp_path / "data_sym_link"
directory.mkdir()
a = directory / "tar_file_with_sym_link.tar"
os.symlink(".." , directory / "subdir" , target_is_directory=_A )
with tarfile.TarFile(_A , "w" ) as f:
f.add(str(directory / "subdir" ) , arcname="subdir" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"insecure_tar_file, error_log" , [("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")] , )
def UpperCAmelCase__ ( UpperCAmelCase__ :str , UpperCAmelCase__ :Union[str, Any] , UpperCAmelCase__ :List[Any] , UpperCAmelCase__ :Tuple , UpperCAmelCase__ :Tuple , UpperCAmelCase__ :List[str] ):
'''simple docstring'''
a = {
"tar_file_with_dot_dot": tar_file_with_dot_dot,
"tar_file_with_sym_link": tar_file_with_sym_link,
}
a = insecure_tar_files[insecure_tar_file]
a = tmp_path / "extracted"
TarExtractor.extract(_A , _A )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] ):
'''simple docstring'''
a = tmpdir / "not_a_zip_file"
# From: https://github.com/python/cpython/pull/5053
a = (
b"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"
b"\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"
b"DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"
b"\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"
)
with not_a_zip_file.open("wb" ) as f:
f.write(_A )
assert zipfile.is_zipfile(str(_A ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(_A ) # but we're right
| 708 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class _lowercase :
def __init__( self : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : int=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=99 , __lowerCAmelCase : List[str]=64 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Optional[Any]=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : Optional[Any]=0.0_2 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : Union[str, Any]=None , ) -> List[str]:
"""simple docstring"""
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = embedding_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_labels
a = num_choices
a = scope
def A ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = None
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a = ids_tensor([self.batch_size] , self.num_choices )
a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : int ) -> List[str]:
"""simple docstring"""
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
def A ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
a = MobileBertModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
a = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
a = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Any ) -> str:
"""simple docstring"""
a = MobileBertForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
a = MobileBertForNextSentencePrediction(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def A ( self : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ) -> List[Any]:
"""simple docstring"""
a = MobileBertForPreTraining(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , next_sentence_label=__lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def A ( self : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
a = MobileBertForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
a = self.num_labels
a = MobileBertForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
a = self.num_labels
a = MobileBertForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
a = self.num_choices
a = MobileBertForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : List[Any] ) -> Dict:
"""simple docstring"""
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
_UpperCAmelCase = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{
'''feature-extraction''': MobileBertModel,
'''fill-mask''': MobileBertForMaskedLM,
'''question-answering''': MobileBertForQuestionAnswering,
'''text-classification''': MobileBertForSequenceClassification,
'''token-classification''': MobileBertForTokenClassification,
'''zero-shot''': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = True
def A ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=False ) -> Any:
"""simple docstring"""
a = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
a = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase )
a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def A ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
a = MobileBertModelTester(self )
a = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def A ( self : int ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : str ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase )
def A ( self : str ) -> str:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase )
def A ( self : List[str] ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase )
def A ( self : int ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase )
def A ( self : List[Any] ) -> int:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase )
def A ( self : List[Any] ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase )
def A ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase )
def A ( self : int ) -> Tuple:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase )
def UpperCAmelCase__ ( UpperCAmelCase__ :Dict ):
'''simple docstring'''
return torch.tensor(
UpperCAmelCase__ , dtype=torch.long , device=UpperCAmelCase__ , )
A_ : Dict = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
@slow
def A ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
a = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(__lowerCAmelCase )
a = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
a = model(__lowerCAmelCase )[0]
a = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , __lowerCAmelCase )
a = torch.tensor(
[
[
[-2.4_73_65_26E07, 8.2_69_16_56E04, 1.6_52_18_38E05],
[-5.7_54_17_04E-01, 3.9_05_60_22E00, 4.4_01_15_07E00],
[2.6_04_73_59E00, 1.5_67_76_52E00, -1.7_32_41_88E-01],
]
] , device=__lowerCAmelCase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
a = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
a = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 32 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = "gpt_bigcode"
lowerCAmelCase__ : List[Any] = ["past_key_values"]
lowerCAmelCase__ : Any = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : str ,UpperCamelCase : Optional[int]=5_0257 ,UpperCamelCase : int=1024 ,UpperCamelCase : str=768 ,UpperCamelCase : int=12 ,UpperCamelCase : int=12 ,UpperCamelCase : Optional[Any]=None ,UpperCamelCase : Tuple="gelu_pytorch_tanh" ,UpperCamelCase : Dict=0.1 ,UpperCamelCase : str=0.1 ,UpperCamelCase : Optional[int]=0.1 ,UpperCamelCase : List[str]=1e-5 ,UpperCamelCase : Tuple=0.0_2 ,UpperCamelCase : str=True ,UpperCamelCase : str=True ,UpperCamelCase : List[Any]=5_0256 ,UpperCamelCase : Tuple=5_0256 ,UpperCamelCase : Union[str, Any]=True ,UpperCamelCase : Optional[int]=True ,UpperCamelCase : Optional[Any]=True ,**UpperCamelCase : Dict ,) -> Any:
_lowercase : List[str] = vocab_size
_lowercase : str = n_positions
_lowercase : Union[str, Any] = n_embd
_lowercase : int = n_layer
_lowercase : Any = n_head
_lowercase : Dict = n_inner
_lowercase : Dict = activation_function
_lowercase : str = resid_pdrop
_lowercase : str = embd_pdrop
_lowercase : Tuple = attn_pdrop
_lowercase : Any = layer_norm_epsilon
_lowercase : Tuple = initializer_range
_lowercase : Tuple = scale_attn_weights
_lowercase : int = use_cache
_lowercase : Union[str, Any] = attention_softmax_in_fpaa
_lowercase : List[str] = scale_attention_softmax_in_fpaa
_lowercase : str = multi_query
_lowercase : List[str] = bos_token_id
_lowercase : int = eos_token_id
super().__init__(bos_token_id=UpperCamelCase ,eos_token_id=UpperCamelCase ,**UpperCamelCase ) | 125 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : int , lowerCAmelCase__ : int) -> int:
'''simple docstring'''
return int((input_a, input_a).count(0) != 0)
def SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
assert nand_gate(0 , 0) == 1
assert nand_gate(0 , 1) == 1
assert nand_gate(1 , 0) == 1
assert nand_gate(1 , 1) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1)) | 125 | 1 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
UpperCamelCase_ = logging.get_logger(__name__)
# General docstring
UpperCamelCase_ = "RegNetConfig"
# Base docstring
UpperCamelCase_ = "facebook/regnet-y-040"
UpperCamelCase_ = [1, 1_0_8_8, 7, 7]
# Image classification docstring
UpperCamelCase_ = "facebook/regnet-y-040"
UpperCamelCase_ = "tabby, tabby cat"
UpperCamelCase_ = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self, A, A, A = 3, A = 1, A = 1, A = "relu", ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = nn.Convad(
A, A, kernel_size=A, stride=A, padding=kernel_size // 2, groups=A, bias=A, )
SCREAMING_SNAKE_CASE : int = nn.BatchNormad(A )
SCREAMING_SNAKE_CASE : str = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.convolution(A )
SCREAMING_SNAKE_CASE : Dict = self.normalization(A )
SCREAMING_SNAKE_CASE : str = self.activation(A )
return hidden_state
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Dict = RegNetConvLayer(
config.num_channels, config.embedding_size, kernel_size=3, stride=2, activation=config.hidden_act )
SCREAMING_SNAKE_CASE : Optional[int] = config.num_channels
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
SCREAMING_SNAKE_CASE : int = self.embedder(A )
return hidden_state
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self, A, A, A = 2 ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : int = nn.Convad(A, A, kernel_size=1, stride=A, bias=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.BatchNormad(A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.convolution(A )
SCREAMING_SNAKE_CASE : Any = self.normalization(A )
return hidden_state
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self, A, A ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : List[Any] = nn.AdaptiveAvgPoolad((1, 1) )
SCREAMING_SNAKE_CASE : List[str] = nn.Sequential(
nn.Convad(A, A, kernel_size=1 ), nn.ReLU(), nn.Convad(A, A, kernel_size=1 ), nn.Sigmoid(), )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.pooler(A )
SCREAMING_SNAKE_CASE : str = self.attention(A )
SCREAMING_SNAKE_CASE : List[str] = hidden_state * attention
return hidden_state
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self, A, A, A, A = 1 ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Dict = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE : Any = max(1, out_channels // config.groups_width )
SCREAMING_SNAKE_CASE : Optional[Any] = (
RegNetShortCut(A, A, stride=A ) if should_apply_shortcut else nn.Identity()
)
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Sequential(
RegNetConvLayer(A, A, kernel_size=1, activation=config.hidden_act ), RegNetConvLayer(A, A, stride=A, groups=A, activation=config.hidden_act ), RegNetConvLayer(A, A, kernel_size=1, activation=A ), )
SCREAMING_SNAKE_CASE : List[str] = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_state
SCREAMING_SNAKE_CASE : Any = self.layer(A )
SCREAMING_SNAKE_CASE : str = self.shortcut(A )
hidden_state += residual
SCREAMING_SNAKE_CASE : int = self.activation(A )
return hidden_state
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self, A, A, A, A = 1 ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : str = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE : Union[str, Any] = max(1, out_channels // config.groups_width )
SCREAMING_SNAKE_CASE : Union[str, Any] = (
RegNetShortCut(A, A, stride=A ) if should_apply_shortcut else nn.Identity()
)
SCREAMING_SNAKE_CASE : List[str] = nn.Sequential(
RegNetConvLayer(A, A, kernel_size=1, activation=config.hidden_act ), RegNetConvLayer(A, A, stride=A, groups=A, activation=config.hidden_act ), RegNetSELayer(A, reduced_channels=int(round(in_channels / 4 ) ) ), RegNetConvLayer(A, A, kernel_size=1, activation=A ), )
SCREAMING_SNAKE_CASE : Optional[int] = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_state
SCREAMING_SNAKE_CASE : Union[str, Any] = self.layer(A )
SCREAMING_SNAKE_CASE : Tuple = self.shortcut(A )
hidden_state += residual
SCREAMING_SNAKE_CASE : Optional[Any] = self.activation(A )
return hidden_state
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self, A, A, A, A = 2, A = 2, ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : str = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
A, A, A, stride=A, ), *[layer(A, A, A ) for _ in range(depth - 1 )], )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.layers(A )
return hidden_state
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
A, config.embedding_size, config.hidden_sizes[0], stride=2 if config.downsample_in_first_stage else 1, depth=config.depths[0], ) )
SCREAMING_SNAKE_CASE : List[Any] = zip(config.hidden_sizes, config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(A, config.depths[1:] ):
self.stages.append(RegNetStage(A, A, A, depth=A ) )
def UpperCamelCase_ ( self, A, A = False, A = True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
SCREAMING_SNAKE_CASE : List[Any] = hidden_states + (hidden_state,)
SCREAMING_SNAKE_CASE : Optional[int] = stage_module(A )
if output_hidden_states:
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=A, hidden_states=A )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Any = RegNetConfig
A : Optional[Any] = '''regnet'''
A : Tuple = '''pixel_values'''
A : Dict = True
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if isinstance(A, nn.Convad ):
nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu' )
elif isinstance(A, (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight, 1 )
nn.init.constant_(module.bias, 0 )
def UpperCamelCase_ ( self, A, A=False ):
'''simple docstring'''
if isinstance(A, A ):
SCREAMING_SNAKE_CASE : int = value
UpperCamelCase_ = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCamelCase_ = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , SCREAMING_SNAKE_CASE , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
super().__init__(A )
SCREAMING_SNAKE_CASE : Optional[int] = config
SCREAMING_SNAKE_CASE : List[str] = RegNetEmbeddings(A )
SCREAMING_SNAKE_CASE : Tuple = RegNetEncoder(A )
SCREAMING_SNAKE_CASE : List[Any] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC, output_type=A, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE, )
def UpperCamelCase_ ( self, A, A = None, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE : int = self.embedder(A )
SCREAMING_SNAKE_CASE : List[str] = self.encoder(
A, output_hidden_states=A, return_dict=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_outputs[0]
SCREAMING_SNAKE_CASE : int = self.pooler(A )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A, pooler_output=A, hidden_states=encoder_outputs.hidden_states, )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , SCREAMING_SNAKE_CASE , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
super().__init__(A )
SCREAMING_SNAKE_CASE : List[Any] = config.num_labels
SCREAMING_SNAKE_CASE : str = RegNetModel(A )
# classification head
SCREAMING_SNAKE_CASE : Dict = nn.Sequential(
nn.Flatten(), nn.Linear(config.hidden_sizes[-1], config.num_labels ) if config.num_labels > 0 else nn.Identity(), )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=A, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, )
def UpperCamelCase_ ( self, A = None, A = None, A = None, A = None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE : Any = self.regnet(A, output_hidden_states=A, return_dict=A )
SCREAMING_SNAKE_CASE : List[str] = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE : Dict = self.classifier(A )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE : str = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE : List[Any] = 'single_label_classification'
else:
SCREAMING_SNAKE_CASE : Optional[Any] = 'multi_label_classification'
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE : Optional[Any] = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE : List[str] = loss_fct(logits.squeeze(), labels.squeeze() )
else:
SCREAMING_SNAKE_CASE : Tuple = loss_fct(A, A )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE : int = CrossEntropyLoss()
SCREAMING_SNAKE_CASE : Tuple = loss_fct(logits.view(-1, self.num_labels ), labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE : Tuple = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE : str = loss_fct(A, A )
if not return_dict:
SCREAMING_SNAKE_CASE : List[str] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=A, logits=A, hidden_states=outputs.hidden_states )
| 508 |
'''simple docstring'''
from __future__ import annotations
import math
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: bool ,__UpperCamelCase: list[int] ,__UpperCamelCase: float ):
"""simple docstring"""
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(__UpperCamelCase ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 ,node_index * 2 ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ,minimax(depth + 1 ,node_index * 2 + 1 ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ,)
return min(
minimax(depth + 1 ,node_index * 2 ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ,minimax(depth + 1 ,node_index * 2 + 1 ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ,)
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
SCREAMING_SNAKE_CASE : List[Any] = math.log(len(__UpperCamelCase ) ,2 )
print('Optimal value : ' ,end='' )
print(minimax(0 ,0 ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 508 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 61 |
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="None" , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = parent
UpperCAmelCase : Optional[Any] = batch_size
UpperCAmelCase : List[str] = seq_length
UpperCAmelCase : Dict = is_training
UpperCAmelCase : Tuple = use_input_mask
UpperCAmelCase : List[str] = use_token_type_ids
UpperCAmelCase : Any = use_labels
UpperCAmelCase : Optional[int] = vocab_size
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : Any = num_hidden_layers
UpperCAmelCase : Dict = num_attention_heads
UpperCAmelCase : Tuple = intermediate_size
UpperCAmelCase : int = hidden_act
UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase : Optional[int] = max_position_embeddings
UpperCAmelCase : List[str] = type_vocab_size
UpperCAmelCase : Any = type_sequence_label_size
UpperCAmelCase : Any = initializer_range
UpperCAmelCase : str = num_labels
UpperCAmelCase : Tuple = num_choices
UpperCAmelCase : int = relative_attention
UpperCAmelCase : Optional[Any] = position_biased_input
UpperCAmelCase : List[Any] = pos_att_type
UpperCAmelCase : str = scope
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[int] = None
if self.use_input_mask:
UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : int = None
UpperCAmelCase : int = None
UpperCAmelCase : int = None
if self.use_labels:
UpperCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.get_config()
UpperCAmelCase : Optional[int] = 300
return config
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Dict = DebertaModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase : int = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : str = DebertaForMaskedLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
UpperCAmelCase : Dict = self.num_labels
UpperCAmelCase : Dict = DebertaForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : Dict = DebertaForTokenClassification(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Optional[int] = DebertaForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : Union[str, Any] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : int = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Tuple = config_and_inputs
UpperCAmelCase : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : Dict = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowerCAmelCase : Dict = (
{
'feature-extraction': DebertaModel,
'fill-mask': DebertaForMaskedLM,
'question-answering': DebertaForQuestionAnswering,
'text-classification': DebertaForSequenceClassification,
'token-classification': DebertaForTokenClassification,
'zero-shot': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase : Union[str, Any] = True
__lowerCAmelCase : Optional[int] = False
__lowerCAmelCase : Any = False
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : str = False
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] = DebertaModelTester(self )
UpperCAmelCase : int = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : int = DebertaModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
pass
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[Any] = DebertaModel.from_pretrained("""microsoft/deberta-base""" )
UpperCAmelCase : Union[str, Any] = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
UpperCAmelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) , F"{output[:, 1:4, 1:4]}" )
| 160 | 0 |
from math import factorial
def _lowerCamelCase( lowerCAmelCase__ : int = 20 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
SCREAMING_SNAKE_CASE_ : List[str] = n // 2
return int(factorial(lowerCAmelCase__ ) / (factorial(lowerCAmelCase__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
A = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.') | 97 |
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
A = importlib.util.spec_from_file_location(
'transformers',
os.path.join(PATH_TO_TRANSFORMERS, '__init__.py'),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
A = spec.loader.load_module()
A = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
A = re.compile('\[(.+?)\]\((https://huggingface\.co/.+?)\)')
A = {
'CLIPConfigMixin',
'DecisionTransformerConfigMixin',
'EncoderDecoderConfigMixin',
'RagConfigMixin',
'SpeechEncoderDecoderConfigMixin',
'VisionEncoderDecoderConfigMixin',
'VisionTextDualEncoderConfigMixin',
}
def _lowerCamelCase( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for config_class in list(CONFIG_MAPPING.values() ):
SCREAMING_SNAKE_CASE_ : int = False
# source code of `config_class`
SCREAMING_SNAKE_CASE_ : Union[str, Any] = inspect.getsource(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = _re_checkpoint.findall(lowerCAmelCase__ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
SCREAMING_SNAKE_CASE_ : List[str] = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
SCREAMING_SNAKE_CASE_ : Optional[int] = True
break
SCREAMING_SNAKE_CASE_ : Tuple = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
SCREAMING_SNAKE_CASE_ : str = '\n'.join(sorted(lowerCAmelCase__ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints() | 97 | 1 |
"""simple docstring"""
def snake_case ( A__ = 3 ,A__ = 7 ,A__ = 1_00_00_00 ):
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : str = 1
for current_denominator in range(1 ,limit + 1 ):
UpperCAmelCase_ : Tuple = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
UpperCAmelCase_ : Optional[int] = current_numerator
UpperCAmelCase_ : Any = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=100_0000))
| 95 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = torch.device('''cpu''')
def snake_case ( ):
UpperCAmelCase_ : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ : str = Image.open(requests.get(A__ ,stream=A__ ).raw )
return im
def snake_case ( A__ ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Tuple = dct.pop(A__ )
UpperCAmelCase_ : Optional[Any] = val
def snake_case ( A__ ):
UpperCAmelCase_ : List[str] = []
for k in state_dict.keys():
UpperCAmelCase_ : Union[str, Any] = k
if ".pwconv" in k:
UpperCAmelCase_ : Dict = k_new.replace(".pwconv" ,".point_wise_conv" )
if ".dwconv" in k:
UpperCAmelCase_ : Any = k_new.replace(".dwconv" ,".depth_wise_conv" )
if ".Proj." in k:
UpperCAmelCase_ : Dict = k_new.replace(".Proj." ,".proj." )
if "patch_embed" in k_new:
UpperCAmelCase_ : Tuple = k_new.replace("patch_embed" ,"swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
UpperCAmelCase_ : List[Any] = k_new.split("." )
if ls[2].isdigit():
UpperCAmelCase_ : Tuple = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
UpperCAmelCase_ : Optional[Any] = k_new.replace("network" ,"swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Optional[int] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase_ : Optional[Any] = 10_00
UpperCAmelCase_ : str = "huggingface/label-files"
UpperCAmelCase_ : str = "imagenet-1k-id2label.json"
UpperCAmelCase_ : List[str] = json.load(open(hf_hub_download(A__ ,A__ ,repo_type="dataset" ) ,"r" ) )
UpperCAmelCase_ : Tuple = {int(A__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ : List[Any] = idalabel
UpperCAmelCase_ : Optional[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
UpperCAmelCase_ : Tuple = [3, 3, 6, 4]
UpperCAmelCase_ : str = [48, 56, 1_12, 2_20]
elif swiftformer_name == "swiftformer_s":
UpperCAmelCase_ : Optional[Any] = [3, 3, 9, 6]
UpperCAmelCase_ : Optional[Any] = [48, 64, 1_68, 2_24]
elif swiftformer_name == "swiftformer_l1":
UpperCAmelCase_ : int = [4, 3, 10, 5]
UpperCAmelCase_ : Union[str, Any] = [48, 96, 1_92, 3_84]
elif swiftformer_name == "swiftformer_l3":
UpperCAmelCase_ : Dict = [4, 4, 12, 6]
UpperCAmelCase_ : Optional[int] = [64, 1_28, 3_20, 5_12]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
UpperCAmelCase_ : List[Any] = torch.hub.load_state_dict_from_url(A__ ,map_location="cpu" ,check_hash=A__ )
else:
UpperCAmelCase_ : Any = torch.load(A__ ,map_location="cpu" )
UpperCAmelCase_ : List[str] = checkpoint
UpperCAmelCase_ : Dict = create_rename_keys(A__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(A__ ,A__ ,A__ )
# load HuggingFace model
UpperCAmelCase_ : Optional[int] = SwiftFormerForImageClassification(A__ ).eval()
hf_model.load_state_dict(A__ )
# prepare test inputs
UpperCAmelCase_ : Tuple = prepare_img()
UpperCAmelCase_ : int = ViTImageProcessor.from_pretrained("preprocessor_config" )
UpperCAmelCase_ : int = processor(images=A__ ,return_tensors="pt" )
# compare outputs from both models
UpperCAmelCase_ : List[Any] = get_expected_output(A__ )
UpperCAmelCase_ : int = hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 10_00] )
assert torch.allclose(hf_logits[0, 0:5] ,A__ ,atol=1e-3 )
Path(A__ ).mkdir(exist_ok=A__ )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(A__ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
lowerCamelCase_ = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 95 | 1 |
'''simple docstring'''
lowerCAmelCase_ : List[str] = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCAmelCase_ : int = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCAmelCase_ : int = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 709 |
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def UpperCAmelCase ( A : Union[str, Any] , A : Optional[int] , A : Any ):
SCREAMING_SNAKE_CASE : List[str] = 1.5
SCREAMING_SNAKE_CASE : str = int(factor * num_class_images )
SCREAMING_SNAKE_CASE : Union[str, Any] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=A , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=A )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
SCREAMING_SNAKE_CASE : Union[str, Any] = client.query(text=A )
if len(A ) >= factor * num_class_images or num_images > 1e4:
break
else:
SCREAMING_SNAKE_CASE : Optional[int] = int(factor * num_images )
SCREAMING_SNAKE_CASE : str = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=A , aesthetic_weight=0.1 , )
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Tuple = tqdm(desc='''downloading real regularization images''' , total=A )
with open(F"""{class_data_dir}/caption.txt""" , '''w''' ) as fa, open(F"""{class_data_dir}/urls.txt""" , '''w''' ) as fa, open(
F"""{class_data_dir}/images.txt""" , '''w''' ) as fa:
while total < num_class_images:
SCREAMING_SNAKE_CASE : int = class_images[count]
count += 1
try:
SCREAMING_SNAKE_CASE : int = requests.get(images['''url'''] )
if img.status_code == 200:
SCREAMING_SNAKE_CASE : List[str] = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def UpperCAmelCase ( ):
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser('''''' , add_help=A )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=A , type=A )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=A , type=A )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=200 , type=A )
return parser.parse_args()
if __name__ == "__main__":
lowerCAmelCase_ : List[str] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 464 | 0 |
from __future__ import annotations
def UpperCAmelCase_ ( __UpperCAmelCase : list[int] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> None:
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = array[indexa], array[indexa]
def UpperCAmelCase_ ( __UpperCAmelCase : list[int] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> None:
if length > 1:
SCREAMING_SNAKE_CASE_ = int(length / 2 )
for i in range(__UpperCAmelCase , low + middle ):
comp_and_swap(__UpperCAmelCase , __UpperCAmelCase , i + middle , __UpperCAmelCase )
bitonic_merge(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
bitonic_merge(__UpperCAmelCase , low + middle , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : list[int] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> None:
if length > 1:
SCREAMING_SNAKE_CASE_ = int(length / 2 )
bitonic_sort(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , 1 )
bitonic_sort(__UpperCAmelCase , low + middle , __UpperCAmelCase , 0 )
bitonic_merge(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : Optional[Any] = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase__ : Tuple = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ') | 31 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=True , snake_case__="pt" ):
A_ : Dict = {"""add_prefix_space""": True} if isinstance(snake_case__ , snake_case__ ) and not line.startswith(""" """ ) else {}
A_ : int = padding_side
return tokenizer(
[line] , max_length=snake_case__ , padding="""max_length""" if pad_to_max_length else None , truncation=snake_case__ , return_tensors=snake_case__ , add_special_tokens=snake_case__ , **snake_case__ , )
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , ):
A_ : int = input_ids.ne(snake_case__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="train" , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_="" , ):
super().__init__()
A_ : str = Path(lowerCAmelCase_ ).joinpath(type_path + """.source""" )
A_ : Tuple = Path(lowerCAmelCase_ ).joinpath(type_path + """.target""" )
A_ : Optional[Any] = self.get_char_lens(self.src_file )
A_ : Optional[Any] = max_source_length
A_ : Tuple = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
A_ : Tuple = tokenizer
A_ : Optional[int] = prefix
if n_obs is not None:
A_ : Union[str, Any] = self.src_lens[:n_obs]
A_ : Optional[int] = src_lang
A_ : Union[str, Any] = tgt_lang
def __len__(self ):
return len(self.src_lens )
def __getitem__(self , lowerCAmelCase_ ):
A_ : Optional[Any] = index + 1 # linecache starts at 1
A_ : int = self.prefix + linecache.getline(str(self.src_file ) , lowerCAmelCase_ ).rstrip("""\n""" )
A_ : Any = linecache.getline(str(self.tgt_file ) , lowerCAmelCase_ ).rstrip("""\n""" )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCAmelCase_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A_ : Optional[Any] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCAmelCase_ ) else self.tokenizer
)
A_ : Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer , lowerCAmelCase_ ) else self.tokenizer
A_ : str = encode_line(lowerCAmelCase_ , lowerCAmelCase_ , self.max_source_length , """right""" )
A_ : Optional[Any] = encode_line(lowerCAmelCase_ , lowerCAmelCase_ , self.max_target_length , """right""" )
A_ : int = source_inputs["""input_ids"""].squeeze()
A_ : int = target_inputs["""input_ids"""].squeeze()
A_ : Tuple = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowerCamelCase(lowerCAmelCase_ ):
return [len(lowerCAmelCase_ ) for x in Path(lowerCAmelCase_ ).open().readlines()]
def lowerCamelCase(self , lowerCAmelCase_ ):
A_ : List[str] = torch.stack([x["""input_ids"""] for x in batch] )
A_ : Optional[int] = torch.stack([x["""attention_mask"""] for x in batch] )
A_ : Any = torch.stack([x["""decoder_input_ids"""] for x in batch] )
A_ : Optional[Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase_ )
else self.tokenizer.pad_token_id
)
A_ : int = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase_ )
else self.tokenizer.pad_token_id
)
A_ : List[str] = trim_batch(lowerCAmelCase_ , lowerCAmelCase_ )
A_ , A_ : Dict = trim_batch(lowerCAmelCase_ , lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
A_ : Optional[Any] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
_lowerCAmelCase = getLogger(__name__)
def __UpperCamelCase ( snake_case__ ):
return list(itertools.chain.from_iterable(snake_case__ ) )
def __UpperCamelCase ( snake_case__ ):
A_ : List[str] = get_git_info()
save_json(snake_case__ , os.path.join(snake_case__ , """git_log.json""" ) )
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=4 , **snake_case__ ):
with open(snake_case__ , """w""" ) as f:
json.dump(snake_case__ , snake_case__ , indent=snake_case__ , **snake_case__ )
def __UpperCamelCase ( snake_case__ ):
with open(snake_case__ ) as f:
return json.load(snake_case__ )
def __UpperCamelCase ( ):
A_ : Optional[int] = git.Repo(search_parent_directories=snake_case__ )
A_ : Union[str, Any] = {
"""repo_id""": str(snake_case__ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def __UpperCamelCase ( snake_case__ , snake_case__ ):
return list(map(snake_case__ , snake_case__ ) )
def __UpperCamelCase ( snake_case__ , snake_case__ ):
with open(snake_case__ , """wb""" ) as f:
return pickle.dump(snake_case__ , snake_case__ )
def __UpperCamelCase ( snake_case__ ):
def remove_articles(snake_case__ ):
return re.sub(R"""\b(a|an|the)\b""" , """ """ , snake_case__ )
def white_space_fix(snake_case__ ):
return " ".join(text.split() )
def remove_punc(snake_case__ ):
A_ : Optional[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(snake_case__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case__ ) ) ) )
def __UpperCamelCase ( snake_case__ , snake_case__ ):
A_ : Tuple = normalize_answer(snake_case__ ).split()
A_ : Dict = normalize_answer(snake_case__ ).split()
A_ : int = Counter(snake_case__ ) & Counter(snake_case__ )
A_ : Dict = sum(common.values() )
if num_same == 0:
return 0
A_ : str = 1.0 * num_same / len(snake_case__ )
A_ : Any = 1.0 * num_same / len(snake_case__ )
A_ : Union[str, Any] = (2 * precision * recall) / (precision + recall)
return fa
def __UpperCamelCase ( snake_case__ , snake_case__ ):
return normalize_answer(snake_case__ ) == normalize_answer(snake_case__ )
def __UpperCamelCase ( snake_case__ , snake_case__ ):
assert len(snake_case__ ) == len(snake_case__ )
A_ : Optional[Any] = 0
for hypo, pred in zip(snake_case__ , snake_case__ ):
em += exact_match_score(snake_case__ , snake_case__ )
if len(snake_case__ ) > 0:
em /= len(snake_case__ )
return {"em": em}
def __UpperCamelCase ( snake_case__ ):
return model_prefix.startswith("""rag""" )
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ ):
A_ : Any = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A_ : List[Any] = """dropout_rate"""
for p in extra_params:
if getattr(snake_case__ , snake_case__ , snake_case__ ):
if not hasattr(snake_case__ , snake_case__ ) and not hasattr(snake_case__ , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(snake_case__ ) )
delattr(snake_case__ , snake_case__ )
continue
A_ : Dict = p if hasattr(snake_case__ , snake_case__ ) else equivalent_param[p]
setattr(snake_case__ , snake_case__ , getattr(snake_case__ , snake_case__ ) )
delattr(snake_case__ , snake_case__ )
return hparams, config
| 180 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase_ = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''PoolFormerFeatureExtractor''']
lowerCamelCase_ = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 700 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def snake_case ( A__ ):
UpperCAmelCase_ : Tuple = []
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
F"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
F"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
F"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
F"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : List[Any] = []
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def snake_case ( A__ ):
UpperCAmelCase_ : Union[str, Any] = []
token.append((F"""cvt.encoder.stages.{idx}.cls_token""", "stage2.cls_token") )
return token
def snake_case ( ):
UpperCAmelCase_ : str = []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def snake_case ( A__ ,A__ ,A__ ,A__ ):
UpperCAmelCase_ : int = "imagenet-1k-id2label.json"
UpperCAmelCase_ : Union[str, Any] = 10_00
UpperCAmelCase_ : Any = "huggingface/label-files"
UpperCAmelCase_ : Optional[int] = num_labels
UpperCAmelCase_ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(A__ ,A__ ,repo_type="dataset" ) ) ,"r" ) )
UpperCAmelCase_ : Optional[int] = {int(A__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Optional[int] = idalabel
UpperCAmelCase_ : Optional[Any] = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : str = CvtConfig(num_labels=A__ ,idalabel=A__ ,labelaid=A__ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" ,1 )[-1][4:6] == "13":
UpperCAmelCase_ : Optional[int] = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" ,1 )[-1][4:6] == "21":
UpperCAmelCase_ : str = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
UpperCAmelCase_ : Optional[int] = [2, 2, 20]
UpperCAmelCase_ : List[str] = [3, 12, 16]
UpperCAmelCase_ : Optional[Any] = [1_92, 7_68, 10_24]
UpperCAmelCase_ : Tuple = CvtForImageClassification(A__ )
UpperCAmelCase_ : Tuple = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : List[str] = torch.load(A__ ,map_location=torch.device("cpu" ) )
UpperCAmelCase_ : int = OrderedDict()
UpperCAmelCase_ : Optional[Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
UpperCAmelCase_ : Optional[Any] = list_of_state_dict + cls_token(A__ )
UpperCAmelCase_ : Any = list_of_state_dict + embeddings(A__ )
for cnt in range(config.depth[idx] ):
UpperCAmelCase_ : Dict = list_of_state_dict + attention(A__ ,A__ )
UpperCAmelCase_ : Union[str, Any] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(A__ )
for i in range(len(A__ ) ):
UpperCAmelCase_ : Any = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=r'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCamelCase_ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 463 | 0 |
import cva
import numpy as np
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : float , _snake_case : int ):
"""simple docstring"""
if k in (0.04, 0.06):
A__ = k
A__ = window_size
else:
raise ValueError('invalid k value' )
def __str__( self : Any ):
"""simple docstring"""
return str(self.k )
def _a ( self : Union[str, Any] , _snake_case : str ):
"""simple docstring"""
A__ = cva.imread(_snake_case , 0 )
A__ , A__ = img.shape
A__ = []
A__ = img.copy()
A__ = cva.cvtColor(_snake_case , cva.COLOR_GRAY2RGB )
A__ , A__ = np.gradient(_snake_case )
A__ = dx**2
A__ = dy**2
A__ = dx * dy
A__ = 0.04
A__ = self.window_size // 2
for y in range(_snake_case , h - offset ):
for x in range(_snake_case , w - offset ):
A__ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A__ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A__ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A__ = (wxx * wyy) - (wxy**2)
A__ = wxx + wyy
A__ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = HarrisCorner(0.04, 3)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = edge_detect.detect('''path_to_image''')
cva.imwrite('''detect.png''', color_img)
| 9 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
a : List[str] = logging.get_logger(__name__)
def __UpperCAmelCase ( _UpperCAmelCase : Dict ) -> Union[str, Any]:
__snake_case = torch.load(_UpperCAmelCase , map_location="cpu" )
if "model" in sd.keys():
__snake_case = torch.load(_UpperCAmelCase , map_location="cpu" )["model"]
# pop unnecessary weights
__snake_case = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_UpperCAmelCase )
__snake_case = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__snake_case = sd.pop(_UpperCAmelCase )
__snake_case = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__snake_case = sd[key]
# We split QKV in separate Q,K,V
__snake_case = key.replace(".qkv_proj." , ".q_proj." )
__snake_case = key.replace(".qkv_proj." , ".k_proj." )
__snake_case = key.replace(".qkv_proj." , ".v_proj." )
__snake_case = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__snake_case , __snake_case , __snake_case = torch.split(_UpperCAmelCase , depth // 3 , dim=0 )
__snake_case = q
__snake_case = k
__snake_case = v
del sd[key]
return sd
@torch.no_grad()
def __UpperCAmelCase ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int=None ) -> Any:
__snake_case = load_checkpoint(_UpperCAmelCase )
if config is not None:
__snake_case = OPTConfig.from_pretrained(_UpperCAmelCase )
else:
__snake_case = OPTConfig()
__snake_case = OPTModel(_UpperCAmelCase ).half().eval()
model.load_state_dict(_UpperCAmelCase )
# Check results
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
a : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
a : Optional[int] = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 69 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
SCREAMING_SNAKE_CASE__ : List[str] = {
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 233 |
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase( lowerCAmelCase__ ):
__snake_case : List[str] = ['image_processor', 'tokenizer']
__snake_case : Optional[int] = 'Pix2StructImageProcessor'
__snake_case : Optional[int] = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[Any] = False
super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __call__( self : Tuple , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = False , SCREAMING_SNAKE_CASE : Union[bool, str, TruncationStrategy] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[int] = 2_048 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , **SCREAMING_SNAKE_CASE : str , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None and not self.image_processor.is_vqa:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.tokenizer
SCREAMING_SNAKE_CASE_ :Tuple = self.tokenizer(
text=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , stride=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , return_overflowing_tokens=SCREAMING_SNAKE_CASE , return_special_tokens_mask=SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE , return_length=SCREAMING_SNAKE_CASE , verbose=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
SCREAMING_SNAKE_CASE_ :Any = self.image_processor(
SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , max_patches=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
else:
# add pixel_values and bbox
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.image_processor(
SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , max_patches=SCREAMING_SNAKE_CASE , header_text=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if text is not None and not self.image_processor.is_vqa:
SCREAMING_SNAKE_CASE_ :Any = self.tokenizer(
text=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , stride=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , return_overflowing_tokens=SCREAMING_SNAKE_CASE , return_special_tokens_mask=SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE , return_length=SCREAMING_SNAKE_CASE , verbose=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
if "attention_mask" in text_encoding:
SCREAMING_SNAKE_CASE_ :List[Any] = text_encoding.pop('attention_mask' )
if "input_ids" in text_encoding:
SCREAMING_SNAKE_CASE_ :Any = text_encoding.pop('input_ids' )
else:
SCREAMING_SNAKE_CASE_ :Any = None
if text_encoding is not None:
encoding_image_processor.update(SCREAMING_SNAKE_CASE )
return encoding_image_processor
def _lowercase ( self : Tuple , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def _lowercase ( self : Tuple , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def _lowercase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Tuple = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ :Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 233 | 1 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 128 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any]=13 , SCREAMING_SNAKE_CASE : Optional[int]=30 , SCREAMING_SNAKE_CASE : Optional[Any]=2 , SCREAMING_SNAKE_CASE : List[Any]=3 , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : str=32 , SCREAMING_SNAKE_CASE : Any=5 , SCREAMING_SNAKE_CASE : Union[str, Any]=4 , SCREAMING_SNAKE_CASE : Any=37 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : Tuple=0.1 , SCREAMING_SNAKE_CASE : str=10 , SCREAMING_SNAKE_CASE : str=0.02 , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : Dict=2 , ):
_A : Dict = parent
_A : Optional[Any] = batch_size
_A : int = image_size
_A : Tuple = patch_size
_A : Dict = num_channels
_A : Union[str, Any] = is_training
_A : Optional[int] = use_labels
_A : Optional[Any] = hidden_size
_A : Dict = num_hidden_layers
_A : Any = num_attention_heads
_A : int = intermediate_size
_A : Union[str, Any] = hidden_act
_A : Any = hidden_dropout_prob
_A : str = attention_probs_dropout_prob
_A : List[Any] = type_sequence_label_size
_A : Optional[Any] = initializer_range
_A : Optional[Any] = scope
_A : str = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_A : int = (image_size // patch_size) ** 2
_A : List[str] = num_patches + 1
def A ( self : Dict):
_A : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_A : str = None
if self.use_labels:
_A : int = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_A : List[Any] = self.get_config()
return config, pixel_values, labels
def A ( self : Union[str, Any]):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def A ( self : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int):
_A : int = ViTModel(config=SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_A : Union[str, Any] = model(SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A ( self : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple):
_A : int = ViTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_A : Optional[int] = model(SCREAMING_SNAKE_CASE)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
_A : Any = 1
_A : Optional[Any] = ViTForMaskedImageModeling(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_A : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_A : int = model(SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def A ( self : Dict , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict):
_A : Union[str, Any] = self.type_sequence_label_size
_A : int = ViTForImageClassification(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_A : List[Any] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
_A : Any = 1
_A : str = ViTForImageClassification(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_A : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_A : Any = model(SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def A ( self : str):
_A : Dict = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) ,
) : List[Any] = config_and_inputs
_A : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
a = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
a = True
a = False
a = False
a = False
def A ( self : str):
_A : Optional[int] = ViTModelTester(self)
_A : Optional[int] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37)
def A ( self : Dict):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def A ( self : Optional[int]):
pass
def A ( self : Any):
_A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : int = model_class(SCREAMING_SNAKE_CASE)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_A : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , nn.Linear))
def A ( self : Any):
_A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : List[str] = model_class(SCREAMING_SNAKE_CASE)
_A : List[str] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : str = [*signature.parameters.keys()]
_A : str = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE)
def A ( self : Optional[Any]):
_A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE)
def A ( self : Dict):
_A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE)
def A ( self : str):
_A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE)
@slow
def A ( self : int):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : int = ViTModel.from_pretrained(SCREAMING_SNAKE_CASE)
self.assertIsNotNone(SCREAMING_SNAKE_CASE)
def lowerCAmelCase__ ( ):
_A : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A ( self : Tuple):
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224') if is_vision_available() else None
@slow
def A ( self : str):
_A : Optional[int] = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224').to(SCREAMING_SNAKE_CASE)
_A : List[str] = self.default_image_processor
_A : List[str] = prepare_img()
_A : Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt').to(SCREAMING_SNAKE_CASE)
# forward pass
with torch.no_grad():
_A : str = model(**SCREAMING_SNAKE_CASE)
# verify the logits
_A : int = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE)
_A : int = torch.tensor([-0.2744, 0.8215, -0.0836]).to(SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4))
@slow
def A ( self : str):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
_A : int = ViTModel.from_pretrained('facebook/dino-vits8').to(SCREAMING_SNAKE_CASE)
_A : Optional[Any] = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=480)
_A : Union[str, Any] = prepare_img()
_A : List[str] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt')
_A : List[str] = inputs.pixel_values.to(SCREAMING_SNAKE_CASE)
# forward pass
with torch.no_grad():
_A : str = model(SCREAMING_SNAKE_CASE , interpolate_pos_encoding=SCREAMING_SNAKE_CASE)
# verify the logits
_A : Any = torch.Size((1, 3601, 384))
self.assertEqual(outputs.last_hidden_state.shape , SCREAMING_SNAKE_CASE)
_A : Optional[int] = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]]).to(SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4))
@slow
@require_accelerate
@require_torch_gpu
def A ( self : str):
_A : str = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto')
_A : List[str] = self.default_image_processor
_A : Any = prepare_img()
_A : Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt')
_A : Tuple = inputs.pixel_values.to(SCREAMING_SNAKE_CASE)
# forward pass to make sure inference works in fp16
with torch.no_grad():
_A : Optional[int] = model(SCREAMING_SNAKE_CASE)
| 128 | 1 |
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowercase = 50003
lowercase = 50002
@require_sentencepiece
@require_tokenizers
class A_ ( snake_case_ , unittest.TestCase ):
UpperCAmelCase__ = PLBartTokenizer
UpperCAmelCase__ = None
UpperCAmelCase__ = False
def _snake_case ( self : Union[str, Any] ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
__magic_name__ = PLBartTokenizer(__lowerCamelCase , language_codes="base" , keep_accents=__lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self : int ) -> Dict:
__magic_name__ = PLBartTokenizer(__lowerCamelCase , language_codes="base" , keep_accents=__lowerCamelCase )
__magic_name__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
__magic_name__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__magic_name__ = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
__magic_name__ = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
__magic_name__ = tokenizer.vocab_size
__magic_name__ = [tokenizer.convert_ids_to_tokens(__lowerCamelCase ) for x in range(end - 4 , __lowerCamelCase )]
self.assertListEqual(__lowerCamelCase , ["__java__", "__python__", "__en_XX__", "<mask>"] )
__magic_name__ = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
__magic_name__ = tokenizer(__lowerCamelCase ).input_ids
self.assertEqual(
tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase ) , __lowerCamelCase , )
def _snake_case ( self : Union[str, Any] ) -> str:
__magic_name__ = PLBartTokenizer(__lowerCamelCase , language_codes="multi" , keep_accents=__lowerCamelCase )
__magic_name__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
__magic_name__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__magic_name__ = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
__magic_name__ = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
__magic_name__ = tokenizer.vocab_size
__magic_name__ = [tokenizer.convert_ids_to_tokens(__lowerCamelCase ) for x in range(end - 7 , __lowerCamelCase )]
self.assertListEqual(
__lowerCamelCase , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
__magic_name__ = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
__magic_name__ = tokenizer(__lowerCamelCase ).input_ids
self.assertEqual(
tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase ) , __lowerCamelCase , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
UpperCAmelCase__ = '''uclanlp/plbart-python-en_XX'''
UpperCAmelCase__ = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
UpperCAmelCase__ = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
UpperCAmelCase__ = [
1_3_4,
5_4_5_2,
3_3_4_6_0,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
9_8_8,
2_0,
3_3_4_5_6,
1_9,
3_3_4_5_6,
7_7_1,
3_9,
4_2_5_8,
8_8_9,
3_3_1_8,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
2_4_7_1,
2,
PYTHON_CODE,
]
@classmethod
def _snake_case ( cls : List[str] ) -> List[str]:
__magic_name__ = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
__magic_name__ = 1
return cls
def _snake_case ( self : Union[str, Any] ) -> Tuple:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 5_0_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 5_0_0_0_3 )
def _snake_case ( self : List[Any] ) -> Tuple:
__magic_name__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowerCamelCase )
def _snake_case ( self : Tuple ) -> Dict:
self.assertIn(__lowerCamelCase , self.tokenizer.all_special_ids )
__magic_name__ = [EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2]
__magic_name__ = self.tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
__magic_name__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token , __lowerCamelCase )
def _snake_case ( self : List[str] ) -> Union[str, Any]:
__magic_name__ = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 2_0]
self.assertIsInstance(src_text[0] , __lowerCamelCase )
__magic_name__ = 1_0
__magic_name__ = self.tokenizer(__lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
def _snake_case ( self : Any ) -> Tuple:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [5_0_0_0_4, 5_0_0_0_1] )
def _snake_case ( self : Optional[int] ) -> Any:
__magic_name__ = tempfile.mkdtemp()
__magic_name__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowerCamelCase )
__magic_name__ = PLBartTokenizer.from_pretrained(__lowerCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowerCamelCase )
@require_torch
def _snake_case ( self : Union[str, Any] ) -> Dict:
__magic_name__ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowerCamelCase , return_tensors="pt" )
__magic_name__ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , __lowerCamelCase )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def _snake_case ( self : str ) -> Union[str, Any]:
__magic_name__ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
__magic_name__ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 2_6) , batch.input_ids.shape )
self.assertEqual((2, 2_6) , batch.attention_mask.shape )
__magic_name__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowerCamelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def _snake_case ( self : List[Any] ) -> List[str]:
__magic_name__ = self.tokenizer(self.src_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=3 , return_tensors="pt" )
__magic_name__ = self.tokenizer(
text_target=self.tgt_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=1_0 , return_tensors="pt" )
__magic_name__ = targets["input_ids"]
__magic_name__ = shift_tokens_right(__lowerCamelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def _snake_case ( self : Tuple ) -> List[Any]:
__magic_name__ = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , {
# A, test, EOS, en_XX
"input_ids": [[1_5_0, 2_4_2, 2, 5_0_0_0_3]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 5_0_0_0_1,
} , )
| 721 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class A_ :
UpperCAmelCase__ = LEDConfig
UpperCAmelCase__ = {}
UpperCAmelCase__ = '''gelu'''
def __init__( self : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[Any]=1_3 , __lowerCamelCase : str=7 , __lowerCamelCase : Any=True , __lowerCamelCase : int=False , __lowerCamelCase : List[Any]=9_9 , __lowerCamelCase : Any=3_2 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Dict=4 , __lowerCamelCase : int=3_7 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=2_0 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : str=1 , __lowerCamelCase : List[Any]=0 , __lowerCamelCase : Dict=4 , ) -> Optional[Any]:
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = eos_token_id
__magic_name__ = pad_token_id
__magic_name__ = bos_token_id
__magic_name__ = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__magic_name__ = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__magic_name__ = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _snake_case ( self : Dict ) -> Optional[Any]:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__magic_name__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__magic_name__ = tf.concat([input_ids, eos_tensor] , axis=1 )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
__magic_name__ = prepare_led_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__magic_name__ = tf.concat(
[tf.zeros_like(__lowerCamelCase )[:, :-1], tf.ones_like(__lowerCamelCase )[:, -1:]] , axis=-1 , )
__magic_name__ = global_attention_mask
return config, inputs_dict
def _snake_case ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Any ) -> Union[str, Any]:
__magic_name__ = TFLEDModel(config=__lowerCamelCase ).get_decoder()
__magic_name__ = inputs_dict["input_ids"]
__magic_name__ = input_ids[:1, :]
__magic_name__ = inputs_dict["attention_mask"][:1, :]
__magic_name__ = 1
# first forward pass
__magic_name__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase , use_cache=__lowerCamelCase )
__magic_name__ , __magic_name__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__magic_name__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
__magic_name__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__magic_name__ = tf.concat([input_ids, next_tokens] , axis=-1 )
__magic_name__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__magic_name__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
__magic_name__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__magic_name__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__magic_name__ = output_from_no_past[:, -3:, random_slice_idx]
__magic_name__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , rtol=1e-3 )
def _lowerCAmelCase ( __lowerCamelCase:str , __lowerCamelCase:str , __lowerCamelCase:List[Any] , __lowerCamelCase:Any=None , __lowerCamelCase:Dict=None , __lowerCamelCase:List[Any]=None , __lowerCamelCase:Union[str, Any]=None , ):
'''simple docstring'''
if attention_mask is None:
__magic_name__ = tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__magic_name__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__magic_name__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__magic_name__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class A_ ( snake_case_ , snake_case_ , unittest.TestCase ):
UpperCAmelCase__ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
UpperCAmelCase__ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__ = (
{
'''conversational''': TFLEDForConditionalGeneration,
'''feature-extraction''': TFLEDModel,
'''summarization''': TFLEDForConditionalGeneration,
'''text2text-generation''': TFLEDForConditionalGeneration,
'''translation''': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _snake_case ( self : int ) -> Optional[int]:
__magic_name__ = TFLEDModelTester(self )
__magic_name__ = ConfigTester(self , config_class=__lowerCamelCase )
def _snake_case ( self : Optional[Any] ) -> Dict:
self.config_tester.run_common_tests()
def _snake_case ( self : Optional[int] ) -> List[Any]:
__magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase )
def _snake_case ( self : List[str] ) -> str:
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = tf.zeros_like(inputs_dict["attention_mask"] )
__magic_name__ = 2
__magic_name__ = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
__magic_name__ = True
__magic_name__ = self.model_tester.seq_length
__magic_name__ = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__lowerCamelCase : int ):
__magic_name__ = outputs.decoder_attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__lowerCamelCase : Any ):
__magic_name__ = [t.numpy() for t in outputs.encoder_attentions]
__magic_name__ = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
__magic_name__ = model_class(__lowerCamelCase )
__magic_name__ = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
__magic_name__ = len(__lowerCamelCase )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
if self.is_encoder_decoder:
__magic_name__ = model_class(__lowerCamelCase )
__magic_name__ = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_decoder_attentions_output(__lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__magic_name__ = True
__magic_name__ = model_class(__lowerCamelCase )
__magic_name__ = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
# Check attention is always last and order is fine
__magic_name__ = True
__magic_name__ = True
__magic_name__ = model_class(__lowerCamelCase )
__magic_name__ = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def _snake_case ( self : Union[str, Any] ) -> List[str]:
pass
def _snake_case ( self : int ) -> str:
# TODO: Head-masking not yet implement
pass
def _lowerCAmelCase ( __lowerCamelCase:Optional[int] ):
'''simple docstring'''
return tf.constant(__lowerCamelCase , dtype=tf.intaa )
lowercase = 1e-4
@slow
@require_tf
class A_ ( unittest.TestCase ):
def _snake_case ( self : Optional[Any] ) -> List[str]:
__magic_name__ = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
__magic_name__ = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
__magic_name__ = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
__magic_name__ = prepare_led_inputs_dict(model.config , __lowerCamelCase , __lowerCamelCase )
__magic_name__ = model(**__lowerCamelCase )[0]
__magic_name__ = (1, 1_0_2_4, 7_6_8)
self.assertEqual(output.shape , __lowerCamelCase )
# change to expected output here
__magic_name__ = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1e-3 )
def _snake_case ( self : Any ) -> Dict:
__magic_name__ = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
__magic_name__ = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
__magic_name__ = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
__magic_name__ = prepare_led_inputs_dict(model.config , __lowerCamelCase , __lowerCamelCase )
__magic_name__ = model(**__lowerCamelCase )[0]
__magic_name__ = (1, 1_0_2_4, model.config.vocab_size)
self.assertEqual(output.shape , __lowerCamelCase )
# change to expected output here
__magic_name__ = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1e-3 , rtol=1e-3 )
| 468 | 0 |
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def lowercase_ ( ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase : List[str] = HfArgumentParser(_lowerCamelCase )
__lowerCamelCase : Tuple = parser.parse_args_into_dataclasses()[0]
__lowerCamelCase : Optional[int] = TensorFlowBenchmark(args=_lowerCamelCase )
try:
__lowerCamelCase : Optional[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__lowerCamelCase : Tuple = "Arg --no_{0} is no longer used, please use --no-{0} instead."
__lowerCamelCase : Any = " ".join(str(_lowerCamelCase ).split(" " )[:-1] )
__lowerCamelCase : Dict = ""
__lowerCamelCase : Optional[Any] = eval(str(_lowerCamelCase ).split(" " )[-1] )
__lowerCamelCase : Optional[int] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
__lowerCamelCase : Union[str, Any] = full_error_msg + begin_error_msg + str(_lowerCamelCase )
raise ValueError(_lowerCamelCase )
benchmark.run()
if __name__ == "__main__":
main() | 646 | """simple docstring"""
import math
def lowercase_ ( _lowerCamelCase: int ) -> list[int]:
'''simple docstring'''
__lowerCamelCase : Optional[int] = []
__lowerCamelCase : Tuple = 2
__lowerCamelCase : str = int(math.sqrt(_lowerCamelCase ) ) # Size of every segment
__lowerCamelCase : str = [True] * (end + 1)
__lowerCamelCase : int = []
while start <= end:
if temp[start] is True:
in_prime.append(_lowerCamelCase )
for i in range(start * start , end + 1 , _lowerCamelCase ):
__lowerCamelCase : List[str] = False
start += 1
prime += in_prime
__lowerCamelCase : Union[str, Any] = end + 1
__lowerCamelCase : Union[str, Any] = min(2 * end , _lowerCamelCase )
while low <= n:
__lowerCamelCase : List[Any] = [True] * (high - low + 1)
for each in in_prime:
__lowerCamelCase : int = math.floor(low / each ) * each
if t < low:
t += each
for j in range(_lowerCamelCase , high + 1 , _lowerCamelCase ):
__lowerCamelCase : Dict = False
for j in range(len(_lowerCamelCase ) ):
if temp[j] is True:
prime.append(j + low )
__lowerCamelCase : List[str] = high + 1
__lowerCamelCase : Union[str, Any] = min(high + end , _lowerCamelCase )
return prime
print(sieve(10**6)) | 646 | 1 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be trained."})
__UpperCAmelCase = field(
default="./" , metadata={"help": "Save dir where model repo is cloned and models updates are saved to."})
__UpperCAmelCase = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path of training dataset."})
__UpperCAmelCase = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."})
__UpperCAmelCase = field(default=2 , metadata={"help": "Batch size for training."})
__UpperCAmelCase = field(default=2 , metadata={"help": "Batch size for evaluation."})
__UpperCAmelCase = field(default=0.1 , metadata={"help": "Value of weight decay."})
__UpperCAmelCase = field(
default=1_0_0_0_0 , metadata={"help": "Size of buffer used to shuffle streaming dataset."})
__UpperCAmelCase = field(default=2E-4 , metadata={"help": "Learning rate fo training."})
__UpperCAmelCase = field(default="cosine" , metadata={"help": "Learning rate."})
__UpperCAmelCase = field(
default=7_5_0 , metadata={"help": "Number of warmup steps in the learning rate schedule."})
__UpperCAmelCase = field(
default=1_6 , metadata={"help": "Number of gradient accumulation steps."})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Use gradient checkpointing to reduce memory footprint."})
__UpperCAmelCase = field(default=5_0_0_0_0 , metadata={"help": "Maximum number of training steps."})
__UpperCAmelCase = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."})
__UpperCAmelCase = field(default=1_0_2_4 , metadata={"help": "Sequence lengths used for training."})
__UpperCAmelCase = field(default=1 , metadata={"help": "Training seed."})
__UpperCAmelCase = field(
default=1_0_2_4 , metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "States path if the training should continue from a checkpoint folder."})
__UpperCAmelCase = field(default=UpperCamelCase , metadata={"help": "If True the data is pretokenized."})
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."})
__UpperCAmelCase = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."})
__UpperCAmelCase = field(default=2 , metadata={"help": "Batch size used for evaluation."})
__UpperCAmelCase = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."})
__UpperCAmelCase = field(default=1_0_2_4 , metadata={"help": "Length of sequences to be evaluated."})
__UpperCAmelCase = field(default=1 , metadata={"help": "Random seed used for evaluation."})
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."})
__UpperCAmelCase = field(default=UpperCamelCase , metadata={"help": "Number of workers used for code evaluation."})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Sample from the language model's output distribution."})
__UpperCAmelCase = field(default=0.2 , metadata={"help": "Sampling temperature used for generation."})
__UpperCAmelCase = field(default=2_5_6 , metadata={"help": "Maximum number of newly generated tokens."})
__UpperCAmelCase = field(default=0 , metadata={"help": "Top-k parameter used for generation."})
__UpperCAmelCase = field(default=0.95 , metadata={"help": "Top-p parameter used for nucleus sampling."})
__UpperCAmelCase = field(default=1_0 , metadata={"help": "Number of generations to run in parallel."})
__UpperCAmelCase = field(
default=2_0_0 , metadata={"help": "Number of completions to generate for each sample."})
__UpperCAmelCase = field(default=1 , metadata={"help": "Random seed used for evaluation."})
__UpperCAmelCase = field(
default="eval_results.json" , metadata={"help": "Random seed used for evaluation."})
__UpperCAmelCase = field(
default="0" , metadata={"help": "Allow `code_eval` to execute Python code on machine"})
__UpperCAmelCase = field(
default=-1 , metadata={
"help": (
"Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"
" number corresponds to which GPU device id to run on."
)
} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."
} , )
__UpperCAmelCase = field(
default="transformersbook/codeparrot" , metadata={"help": "Folder or name of dataset to process."})
__UpperCAmelCase = field(
default="codeparrot-clean" , metadata={"help": "Folder to save processed processed dataset."})
__UpperCAmelCase = field(
default=1_0_0_0_0_0 , metadata={"help": "Number of files to save per JSON output file."})
__UpperCAmelCase = field(default="content" , metadata={"help": "Column containing text data to process."})
__UpperCAmelCase = field(
default=1_0_0_0 , metadata={"help": "Maximum line length in file, otherwise file is filtered."})
__UpperCAmelCase = field(
default=1_0_0 , metadata={"help": "Maximum mean line length in file, otherwise file is filtered."})
__UpperCAmelCase = field(
default=0.25 , metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."})
__UpperCAmelCase = field(
default=1.5 , metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."})
__UpperCAmelCase = field(
default=0.7 , metadata={"help": "Probability for filtering config, test and uncommon files."})
__UpperCAmelCase = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "If True, near-duplicate samples are removed."})
__UpperCAmelCase = field(
default=0.85 , metadata={"help": "Jaccard threshold for near-duplicate samples."})
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
default="gpt2" , metadata={"help": "Base tokenizer to build new tokenizer from."})
__UpperCAmelCase = field(
default="transformersbook/codeparrot-train" , metadata={"help": "Dataset to train tokenizer on."})
__UpperCAmelCase = field(default="content" , metadata={"help": "Column containing text data to process."})
__UpperCAmelCase = field(default=2_0_0_0_0_0 , metadata={"help": "Number of examples to train tokenizer on."})
__UpperCAmelCase = field(
default=3_2_7_6_8 , metadata={"help": "Number of examples to train the tokenizer on."})
__UpperCAmelCase = field(default="codeparrot" , metadata={"help": "Name of new tokenizer."})
__UpperCAmelCase = field(default=UpperCamelCase , metadata={"help": "Push saved tokenizer to the hub."})
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."})
__UpperCAmelCase = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path to the dataset to pretokenize."})
__UpperCAmelCase = field(
default="tokenized-codeparrot-train" , metadata={"help": "Repo name of the pretokenized data."})
__UpperCAmelCase = field(default=UpperCamelCase , metadata={"help": "Number of workers used for code evaluation."})
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
default="gpt2-large" , metadata={"help": "Configuration to use for model initialization."})
__UpperCAmelCase = field(
default="codeparrot/codeparrot" , metadata={"help": "Tokenizer attached to model."})
__UpperCAmelCase = field(default="codeparrot" , metadata={"help": "Name of the created model."})
__UpperCAmelCase = field(default=UpperCamelCase , metadata={"help": "Push saved tokenizer to the hub."})
| 679 | def UpperCAmelCase__( __UpperCAmelCase : str ):
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
__snake_case : str = sorted(string.lower() )
return len(__UpperCAmelCase ) == len(set(__UpperCAmelCase ) )
if __name__ == "__main__":
__magic_name__ = input('''Enter a string ''').strip()
__magic_name__ = is_isogram(input_str)
print(F'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 679 | 1 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
a_ : int = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _snake_case ( datasets.BuilderConfig ):
_lowercase : Optional[datasets.Features] = None
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , ):
import pyspark
def generate_fn():
SCREAMING_SNAKE_CASE = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id'))
for partition_id in partition_order:
SCREAMING_SNAKE_CASE = df_with_partition_id.select('*').where(F'''part_id = {partition_id}''').drop('part_id')
SCREAMING_SNAKE_CASE = partition_df.collect()
SCREAMING_SNAKE_CASE = 0
for row in rows:
yield F'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class _snake_case ( _BaseExamplesIterable ):
def __init__( self , a , a=None , ) -> Tuple:
SCREAMING_SNAKE_CASE = df
SCREAMING_SNAKE_CASE = partition_order or range(self.df.rdd.getNumPartitions())
SCREAMING_SNAKE_CASE = _generate_iterable_examples(self.df , self.partition_order)
def __iter__( self) -> Dict:
yield from self.generate_examples_fn()
def SCREAMING_SNAKE_CASE__ ( self , a) -> "SparkExamplesIterable":
SCREAMING_SNAKE_CASE = list(range(self.df.rdd.getNumPartitions()))
generator.shuffle(a)
return SparkExamplesIterable(self.df , partition_order=a)
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> "SparkExamplesIterable":
SCREAMING_SNAKE_CASE = self.split_shard_indices_by_worker(a , a)
return SparkExamplesIterable(self.df , partition_order=a)
@property
def SCREAMING_SNAKE_CASE__ ( self) -> int:
return len(self.partition_order)
class _snake_case ( datasets.DatasetBuilder ):
_lowercase : List[Any] = SparkConfig
def __init__( self , a , a = None , a = None , **a , ) -> List[Any]:
import pyspark
SCREAMING_SNAKE_CASE = pyspark.sql.SparkSession.builder.getOrCreate()
SCREAMING_SNAKE_CASE = df
SCREAMING_SNAKE_CASE = working_dir
super().__init__(
cache_dir=a , config_name=str(self.df.semanticHash()) , **a , )
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
# Returns the path of the created file.
def create_cache_and_write_probe(a):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=a)
SCREAMING_SNAKE_CASE = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex)
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(a , 'a')
return [probe_file]
if self._spark.conf.get('spark.master' , '').startswith('local'):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
SCREAMING_SNAKE_CASE = (
self._spark.sparkContext.parallelize(range(1) , 1).mapPartitions(a).collect()
)
if os.path.isfile(probe[0]):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir')
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
return datasets.DatasetInfo(features=self.config.features)
def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[int]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN)]
def SCREAMING_SNAKE_CASE__ ( self , a) -> Union[str, Any]:
import pyspark
def get_arrow_batch_size(a):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]})
SCREAMING_SNAKE_CASE = self.df.count()
SCREAMING_SNAKE_CASE = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
SCREAMING_SNAKE_CASE = (
self.df.limit(a)
.repartition(1)
.mapInArrow(a , 'batch_bytes: long')
.agg(pyspark.sql.functions.sum('batch_bytes').alias('sample_bytes'))
.collect()[0]
.sample_bytes
/ sample_num_rows
)
SCREAMING_SNAKE_CASE = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
SCREAMING_SNAKE_CASE = min(a , int(approx_total_size / max_shard_size))
SCREAMING_SNAKE_CASE = self.df.repartition(a)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
import pyspark
SCREAMING_SNAKE_CASE = ParquetWriter if file_format == 'parquet' else ArrowWriter
SCREAMING_SNAKE_CASE = os.path.join(self._working_dir , os.path.basename(a)) if self._working_dir else fpath
SCREAMING_SNAKE_CASE = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
SCREAMING_SNAKE_CASE = self.config.features
SCREAMING_SNAKE_CASE = self._writer_batch_size
SCREAMING_SNAKE_CASE = self._fs.storage_options
def write_arrow(a):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
SCREAMING_SNAKE_CASE = pyspark.TaskContext().taskAttemptId()
SCREAMING_SNAKE_CASE = next(a , a)
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = writer_class(
features=a , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''').replace('TTTTT' , f'''{task_id:05d}''') , writer_batch_size=a , storage_options=a , embed_local_files=a , )
SCREAMING_SNAKE_CASE = pa.Table.from_batches([first_batch])
writer.write_table(a)
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
SCREAMING_SNAKE_CASE = writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''').replace('TTTTT' , f'''{task_id:05d}''') , writer_batch_size=a , storage_options=a , embed_local_files=a , )
SCREAMING_SNAKE_CASE = pa.Table.from_batches([batch])
writer.write_table(a)
if writer._num_bytes > 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(a)):
SCREAMING_SNAKE_CASE = os.path.join(os.path.dirname(a) , os.path.basename(a))
shutil.move(a , a)
SCREAMING_SNAKE_CASE = (
self.df.mapInArrow(a , 'task_id: long, num_examples: long, num_bytes: long')
.groupBy('task_id')
.agg(
pyspark.sql.functions.sum('num_examples').alias('total_num_examples') , pyspark.sql.functions.sum('num_bytes').alias('total_num_bytes') , pyspark.sql.functions.count('num_bytes').alias('num_shards') , pyspark.sql.functions.collect_list('num_examples').alias('shard_lengths') , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def SCREAMING_SNAKE_CASE__ ( self , a , a = "arrow" , a = None , a = None , **a , ) -> List[str]:
self._validate_cache_dir()
SCREAMING_SNAKE_CASE = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE)
self._repartition_df_if_needed(a)
SCREAMING_SNAKE_CASE = not is_remote_filesystem(self._fs)
SCREAMING_SNAKE_CASE = os.path.join if is_local else posixpath.join
SCREAMING_SNAKE_CASE = '-TTTTT-SSSSS-of-NNNNN'
SCREAMING_SNAKE_CASE = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
SCREAMING_SNAKE_CASE = path_join(self._output_dir , a)
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for task_id, content in self._prepare_split_single(a , a , a):
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards))
all_shard_lengths.extend(a)
SCREAMING_SNAKE_CASE = total_num_examples
SCREAMING_SNAKE_CASE = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''')
if total_shards > 1:
SCREAMING_SNAKE_CASE = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
SCREAMING_SNAKE_CASE = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
a , a , a , ):
rename(
a , fpath.replace('SSSSS' , f'''{shard_id:05d}''').replace('TTTTT' , f'''{task_id:05d}''') , fpath.replace('TTTTT-SSSSS' , f'''{global_shard_id:05d}''').replace('NNNNN' , f'''{total_shards:05d}''') , )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
for i in range(len(a)):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = task_id_and_num_shards[i]
for shard_id in range(a):
args.append([task_id, shard_id, global_shard_id])
global_shard_id += 1
self._spark.sparkContext.parallelize(a , len(a)).map(lambda a: _rename_shard(*a)).collect()
else:
# don't use any pattern
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , f'''{shard_id:05d}''').replace('TTTTT' , f'''{task_id:05d}''') , fpath.replace(a , '') , )
def SCREAMING_SNAKE_CASE__ ( self , a , ) -> SparkExamplesIterable:
return SparkExamplesIterable(self.df)
| 73 |
"""simple docstring"""
def _lowerCAmelCase ( lowerCamelCase__ : float ) -> float:
if edge <= 0 or not isinstance(lowerCamelCase__, lowerCamelCase__ ):
raise ValueError("Length must be a positive." )
return 3 * ((2_5 + 1_0 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def _lowerCAmelCase ( lowerCamelCase__ : float ) -> float:
if edge <= 0 or not isinstance(lowerCamelCase__, lowerCamelCase__ ):
raise ValueError("Length must be a positive." )
return ((1_5 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 572 | 0 |
import os
from collections.abc import Iterator
def lowerCamelCase__ (_UpperCAmelCase = "."):
for dir_path, dir_names, filenames in os.walk(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_UpperCAmelCase)[1] in (".py", ".ipynb"):
yield os.path.join(_UpperCAmelCase , _UpperCAmelCase).lstrip('./')
def lowerCamelCase__ (_UpperCAmelCase):
return F'''{i * ' '}*''' if i else "\n##"
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = old_path.split(os.sep)
for i, new_part in enumerate(new_path.split(os.sep)):
if (i + 1 > len(_UpperCAmelCase) or old_parts[i] != new_part) and new_part:
print(F'''{md_prefix(_UpperCAmelCase)} {new_part.replace('_' , ' ').title()}''')
return new_path
def lowerCamelCase__ (_UpperCAmelCase = "."):
SCREAMING_SNAKE_CASE = ''
for filepath in sorted(good_file_paths(_UpperCAmelCase)):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = os.path.split(_UpperCAmelCase)
if filepath != old_path:
SCREAMING_SNAKE_CASE = print_path(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = (filepath.count(os.sep) + 1) if filepath else 0
SCREAMING_SNAKE_CASE = F'''{filepath}/{filename}'''.replace(' ' , '%20')
SCREAMING_SNAKE_CASE = os.path.splitext(filename.replace('_' , ' ').title())[0]
print(F'''{md_prefix(_UpperCAmelCase)} [{filename}]({url})''')
if __name__ == "__main__":
print_directory_md('.')
| 444 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self , a , a=7 , a=3 , a=18 , a=30 , a=400 , a=True , a=None , a=True , a=None , a=True , ) -> Any:
SCREAMING_SNAKE_CASE = size if size is not None else {'shortest_edge': 20}
SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_center_crop
SCREAMING_SNAKE_CASE = crop_size
SCREAMING_SNAKE_CASE = do_flip_channel_order
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class _snake_case ( A__ , unittest.TestCase ):
_lowercase : Optional[int] = MobileViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = MobileViTImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE__ ( self) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(a , 'do_resize'))
self.assertTrue(hasattr(a , 'size'))
self.assertTrue(hasattr(a , 'do_center_crop'))
self.assertTrue(hasattr(a , 'center_crop'))
self.assertTrue(hasattr(a , 'do_flip_channel_order'))
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'shortest_edge': 20})
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18})
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {'shortest_edge': 42})
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84})
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
pass
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
# Initialize image_processing
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=a)
for image in image_inputs:
self.assertIsInstance(a , Image.Image)
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a)
for image in image_inputs:
self.assertIsInstance(a , np.ndarray)
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self) -> int:
# Initialize image_processing
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a)
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor)
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 444 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'vocab_file': 'sentencepiece.bpe.model'}
lowercase_ = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
lowercase_ = {
'moussaKam/mbarthez': 1_0_2_4,
'moussaKam/barthez': 1_0_2_4,
'moussaKam/barthez-orangesum-title': 1_0_2_4,
}
lowercase_ = '▁'
class __lowerCAmelCase ( A_ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ["""input_ids""", """attention_mask"""]
def __init__( self , lowerCAmelCase , lowerCAmelCase="<s>" , lowerCAmelCase="</s>" , lowerCAmelCase="</s>" , lowerCAmelCase="<s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<pad>" , lowerCAmelCase="<mask>" , lowerCAmelCase = None , **lowerCAmelCase , ) -> Tuple:
'''simple docstring'''
_lowercase =AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
_lowercase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
_lowercase =vocab_file
_lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCamelCase ) )
_lowercase ={'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
_lowercase =len(self.sp_model ) - 1
_lowercase ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ) -> Tuple:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowercase =[self.cls_token_id]
_lowercase =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = False ) -> str:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1]
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ) -> List[str]:
'''simple docstring'''
_lowercase =[self.sep_token_id]
_lowercase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
return len(self.sp_model )
def A__ ( self ) -> List[str]:
'''simple docstring'''
_lowercase ={self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A__ ( self , lowerCAmelCase ) -> Any:
'''simple docstring'''
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def A__ ( self , lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowercase =self.sp_model.PieceToId(_lowerCamelCase )
return spm_id if spm_id else self.unk_token_id
def A__ ( self , lowerCAmelCase ) -> Dict:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(_lowerCamelCase )
def A__ ( self , lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
_lowercase =[]
_lowercase =''
_lowercase =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCamelCase ) + token
_lowercase =True
_lowercase =[]
else:
current_sub_tokens.append(_lowerCamelCase )
_lowercase =False
out_string += self.sp_model.decode(_lowerCamelCase )
return out_string.strip()
def __getstate__( self ) -> Tuple:
'''simple docstring'''
_lowercase =self.__dict__.copy()
_lowercase =None
return state
def __setstate__( self , lowerCAmelCase ) -> List[str]:
'''simple docstring'''
_lowercase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_lowercase ={}
_lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ) -> List[str]:
'''simple docstring'''
if not os.path.isdir(_lowerCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowercase =os.path.join(
_lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , 'wb' ) as fi:
_lowercase =self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 291 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
UpperCAmelCase__ = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
UpperCAmelCase__ = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
UpperCAmelCase__ = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def lowercase ( self : List[str] ):
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def lowercase ( self : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : Optional[int] , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , ):
_snake_case = len(references[0] )
if any(len(_lowerCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_snake_case = [[refs[i] for refs in references] for i in range(_lowerCamelCase )]
_snake_case = TER(
normalized=_lowerCamelCase , no_punct=_lowerCamelCase , asian_support=_lowerCamelCase , case_sensitive=_lowerCamelCase , )
_snake_case = sb_ter.corpus_score(_lowerCamelCase , _lowerCamelCase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 224 | 0 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__snake_case :Optional[int] = TypeVar('''KEY''')
__snake_case :str = TypeVar('''VAL''')
@dataclass(frozen=UpperCAmelCase__ ,slots=UpperCAmelCase__ )
class _A ( Generic[KEY, VAL] ):
UpperCamelCase__ : KEY
UpperCamelCase__ : VAL
class _A ( _Item ):
def __init__( self : Dict):
'''simple docstring'''
super().__init__(lowerCamelCase__ , lowerCamelCase__)
def __bool__( self : List[str]):
'''simple docstring'''
return False
__snake_case :Optional[Any] = _DeletedItem()
class _A ( MutableMapping[KEY, VAL] ):
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : int = 8 , __SCREAMING_SNAKE_CASE : float = 0.75):
'''simple docstring'''
__a = initial_block_size
__a = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__a = capacity_factor
__a = 0
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : KEY):
'''simple docstring'''
return hash(lowerCamelCase__) % len(self._buckets)
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
return (ind + 1) % len(self._buckets)
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : KEY , __SCREAMING_SNAKE_CASE : VAL):
'''simple docstring'''
__a = self._buckets[ind]
if not stored:
__a = _Item(lowerCamelCase__ , lowerCamelCase__)
self._len += 1
return True
elif stored.key == key:
__a = _Item(lowerCamelCase__ , lowerCamelCase__)
return True
else:
return False
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = len(self._buckets) * self._capacity_factor
return len(self) >= int(lowerCamelCase__)
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
if len(self._buckets) <= self._initial_block_size:
return False
__a = len(self._buckets) * self._capacity_factor / 2
return len(self) < limit
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
__a = self._buckets
__a = [None] * new_size
__a = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val)
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
self._resize(len(self._buckets) * 2)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
self._resize(len(self._buckets) // 2)
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : KEY):
'''simple docstring'''
__a = self._get_bucket_index(lowerCamelCase__)
for _ in range(len(self._buckets)):
yield ind
__a = self._get_next_ind(lowerCamelCase__)
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : KEY , __SCREAMING_SNAKE_CASE : VAL):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase__):
if self._try_set(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__):
break
def __setitem__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : KEY , __SCREAMING_SNAKE_CASE : VAL):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase__ , lowerCamelCase__)
def __delitem__( self : str , __SCREAMING_SNAKE_CASE : KEY):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase__):
__a = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase__)
if item is _deleted:
continue
if item.key == key:
__a = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Optional[int] , __SCREAMING_SNAKE_CASE : KEY):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase__):
__a = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase__)
def __len__( self : int):
'''simple docstring'''
return self._len
def __iter__( self : Optional[Any]):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self : str):
'''simple docstring'''
__a = " ,".join(
F'{item.key}: {item.val}' for item in self._buckets if item)
return F'HashMap({val_string})'
| 718 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__snake_case :int = ''''''
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class _A ( tr.AbstractTransform ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : str = " "):
'''simple docstring'''
__a = sentence_delimiter
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
return list(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
__a = []
for sent_idx, sentence in enumerate(__SCREAMING_SNAKE_CASE):
chars.extend(self.process_string(__SCREAMING_SNAKE_CASE))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__SCREAMING_SNAKE_CASE) - 1:
chars.append(self.sentence_delimiter)
return chars
__snake_case :Any = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__snake_case :Optional[int] = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__snake_case :Optional[int] = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__snake_case :Tuple = '''\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
'''
__snake_case :Tuple = '''
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> cer = datasets.load_metric("cer")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict=False):
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truth_transform=__SCREAMING_SNAKE_CASE , hypothesis_transform=__SCREAMING_SNAKE_CASE , )["wer"]
__a = 0
__a = 0
for prediction, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = jiwer.compute_measures(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truth_transform=__SCREAMING_SNAKE_CASE , hypothesis_transform=__SCREAMING_SNAKE_CASE , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 60 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase=7 , _UpperCamelCase=3 , _UpperCamelCase=1_8 , _UpperCamelCase=3_0 , _UpperCamelCase=4_0_0 , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=True , ) -> List[str]:
UpperCAmelCase_ : Tuple = size if size is not None else {'height': 1_8, 'width': 1_8}
UpperCAmelCase_ : Union[str, Any] = parent
UpperCAmelCase_ : Union[str, Any] = batch_size
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : Dict = image_size
UpperCAmelCase_ : int = min_resolution
UpperCAmelCase_ : List[str] = max_resolution
UpperCAmelCase_ : Optional[Any] = do_resize
UpperCAmelCase_ : List[Any] = size
UpperCAmelCase_ : int = apply_ocr
def __UpperCAmelCase ( self ) -> Tuple:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCamelCase (_snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : int = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Any = LayoutLMvaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'size' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'apply_ocr' ) )
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 1_8, 'width': 1_8} )
UpperCAmelCase_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2} )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
pass
def __UpperCAmelCase ( self ) -> Optional[Any]:
# Initialize image_processing
UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , _UpperCamelCase )
self.assertIsInstance(encoding.boxes , _UpperCamelCase )
# Test batched
UpperCAmelCase_ : Dict = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self ) -> int:
# Initialize image_processing
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
UpperCAmelCase_ : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCAmelCase_ : int = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self ) -> List[str]:
# Initialize image_processing
UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self ) -> int:
# with apply_OCR = True
UpperCAmelCase_ : Tuple = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCAmelCase_ : Dict = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
UpperCAmelCase_ : Tuple = Image.open(ds[0]['file'] ).convert('RGB' )
UpperCAmelCase_ : List[Any] = image_processing(_UpperCamelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCAmelCase_ : Optional[Any] = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
UpperCAmelCase_ : List[Any] = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _UpperCamelCase )
self.assertListEqual(encoding.boxes , _UpperCamelCase )
# with apply_OCR = False
UpperCAmelCase_ : Optional[int] = LayoutLMvaImageProcessor(apply_ocr=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = image_processing(_UpperCamelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
| 406 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[Any] = ['''pixel_values''']
def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1 / 2_5_5 , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> None:
super().__init__(**_UpperCamelCase )
UpperCAmelCase_ : List[Any] = size if size is not None else {'height': 2_2_4, 'width': 2_2_4}
UpperCAmelCase_ : List[str] = get_size_dict(_UpperCamelCase )
UpperCAmelCase_ : int = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
UpperCAmelCase_ : Optional[int] = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase , param_name='crop_size' )
UpperCAmelCase_ : str = do_resize
UpperCAmelCase_ : Dict = do_rescale
UpperCAmelCase_ : int = do_normalize
UpperCAmelCase_ : str = do_center_crop
UpperCAmelCase_ : Optional[Any] = crop_size
UpperCAmelCase_ : Tuple = size
UpperCAmelCase_ : Any = resample
UpperCAmelCase_ : List[str] = rescale_factor
UpperCAmelCase_ : Tuple = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase_ : Union[str, Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BILINEAR , _UpperCamelCase = None , **_UpperCamelCase , ) -> np.ndarray:
UpperCAmelCase_ : List[str] = get_size_dict(_UpperCamelCase )
if "shortest_edge" in size:
UpperCAmelCase_ : Any = get_resize_output_image_size(_UpperCamelCase , size=size['shortest_edge'] , default_to_square=_UpperCamelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
UpperCAmelCase_ : str = (size['height'], size['width'])
else:
raise ValueError(f"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" )
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ) -> np.ndarray:
UpperCAmelCase_ : Union[str, Any] = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(_UpperCamelCase , size=(size['height'], size['width']) , data_format=_UpperCamelCase , **_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> np.ndarray:
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ) -> np.ndarray:
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ) -> BatchFeature:
UpperCAmelCase_ : Optional[int] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : Dict = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : List[str] = get_size_dict(_UpperCamelCase , param_name='crop_size' , default_to_square=_UpperCamelCase )
UpperCAmelCase_ : str = resample if resample is not None else self.resample
UpperCAmelCase_ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : Any = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : List[Any] = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Dict = size if size is not None else self.size
UpperCAmelCase_ : int = get_size_dict(_UpperCamelCase )
if not is_batched(_UpperCamelCase ):
UpperCAmelCase_ : List[str] = [images]
if not valid_images(_UpperCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
UpperCAmelCase_ : List[str] = [to_numpy_array(_UpperCamelCase ) for image in images]
if do_resize:
UpperCAmelCase_ : int = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_center_crop:
UpperCAmelCase_ : Any = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase ) for image in images]
if do_rescale:
UpperCAmelCase_ : Any = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
UpperCAmelCase_ : Dict = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
UpperCAmelCase_ : Dict = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
UpperCAmelCase_ : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase )
| 406 | 1 |
snake_case = {
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
snake_case = {
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 12,
"Pm": 15,
"Em": 18,
"Zm": 21,
"Ym": 24,
}
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = from_type.lower().strip("s" )
_lowerCAmelCase : str = to_type.lower().strip("s" )
_lowerCAmelCase : str = UNIT_SYMBOL.get(lowerCAmelCase__ , lowerCAmelCase__ )
_lowerCAmelCase : int = UNIT_SYMBOL.get(lowerCAmelCase__ , lowerCAmelCase__ )
if from_sanitized not in METRIC_CONVERSION:
_lowerCAmelCase : Union[str, Any] = (
f"""Invalid 'from_type' value: {from_type!r}.\n"""
f"""Conversion abbreviations are: {', '.join(lowerCAmelCase__ )}"""
)
raise ValueError(lowerCAmelCase__ )
if to_sanitized not in METRIC_CONVERSION:
_lowerCAmelCase : Dict = (
f"""Invalid 'to_type' value: {to_type!r}.\n"""
f"""Conversion abbreviations are: {', '.join(lowerCAmelCase__ )}"""
)
raise ValueError(lowerCAmelCase__ )
_lowerCAmelCase : Dict = METRIC_CONVERSION[from_sanitized]
_lowerCAmelCase : List[str] = METRIC_CONVERSION[to_sanitized]
_lowerCAmelCase : Any = 1
if from_exponent > to_exponent:
_lowerCAmelCase : Optional[int] = from_exponent - to_exponent
else:
_lowerCAmelCase : Optional[int] = -(to_exponent - from_exponent)
return value * pow(10 , lowerCAmelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 587 | from __future__ import annotations
import math
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if not scores:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
)
def UpperCamelCase_ ( ):
"""simple docstring"""
_lowerCAmelCase : str = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
_lowerCAmelCase : Dict = math.log(len(lowerCAmelCase__ ) , 2 )
print(f"""Optimal value : {minimax(0 , 0 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 587 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''huggingface/time-series-transformer-tourism-monthly''': (
'''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'''
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Tuple = '''time_series_transformer'''
lowercase_ : Any = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , a_ = None , a_ = None , a_ = "student_t" , a_ = "nll" , a_ = 1 , a_ = [1, 2, 3, 4, 5, 6, 7] , a_ = "mean" , a_ = 0 , a_ = 0 , a_ = 0 , a_ = 0 , a_ = None , a_ = None , a_ = 32 , a_ = 32 , a_ = 2 , a_ = 2 , a_ = 2 , a_ = 2 , a_ = True , a_ = "gelu" , a_ = 64 , a_ = 0.1 , a_ = 0.1 , a_ = 0.1 , a_ = 0.1 , a_ = 0.1 , a_ = 100 , a_ = 0.02 , a_=True , **a_ , ) -> List[Any]:
# time series specific configuration
_UpperCAmelCase = prediction_length
_UpperCAmelCase = context_length or prediction_length
_UpperCAmelCase = distribution_output
_UpperCAmelCase = loss
_UpperCAmelCase = input_size
_UpperCAmelCase = num_time_features
_UpperCAmelCase = lags_sequence
_UpperCAmelCase = scaling
_UpperCAmelCase = num_dynamic_real_features
_UpperCAmelCase = num_static_real_features
_UpperCAmelCase = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(a_ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
_UpperCAmelCase = cardinality
else:
_UpperCAmelCase = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(a_ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
_UpperCAmelCase = embedding_dimension
else:
_UpperCAmelCase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
_UpperCAmelCase = num_parallel_samples
# Transformer architecture configuration
_UpperCAmelCase = input_size * len(a_ ) + self._number_of_features
_UpperCAmelCase = d_model
_UpperCAmelCase = encoder_attention_heads
_UpperCAmelCase = decoder_attention_heads
_UpperCAmelCase = encoder_ffn_dim
_UpperCAmelCase = decoder_ffn_dim
_UpperCAmelCase = encoder_layers
_UpperCAmelCase = decoder_layers
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = encoder_layerdrop
_UpperCAmelCase = decoder_layerdrop
_UpperCAmelCase = activation_function
_UpperCAmelCase = init_std
_UpperCAmelCase = use_cache
super().__init__(is_encoder_decoder=a_ , **a_ )
@property
def _a ( self ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 657 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowerCAmelCase :
def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ) -> List[str]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = self.vocab_size - 1
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[int]:
_UpperCAmelCase = OpenAIGPTModel(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , head_mask=a_ )
_UpperCAmelCase = model(a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> List[Any]:
_UpperCAmelCase = OpenAIGPTLMHeadModel(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[Any]:
_UpperCAmelCase = OpenAIGPTDoubleHeadsModel(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Dict:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = OpenAIGPTForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase_ : Any = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowercase_ : Optional[Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowercase_ : Union[str, Any] = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> Any:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _a ( self , a_ , a_ , a_=False ) -> Optional[int]:
_UpperCAmelCase = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=a_ , )
_UpperCAmelCase = inputs_dict["labels"]
_UpperCAmelCase = inputs_dict["labels"]
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=a_ , )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
return inputs_dict
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = OpenAIGPTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=a_ , n_embd=37 )
def _a ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*a_ )
def _a ( self ) -> Tuple:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*a_ )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a_ )
@slow
def _a ( self ) -> int:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = OpenAIGPTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a ( self ) -> Any:
_UpperCAmelCase = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(a_ )
_UpperCAmelCase = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=a_ ) # the president is
_UpperCAmelCase = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_UpperCAmelCase = model.generate(a_ , do_sample=a_ )
self.assertListEqual(output_ids[0].tolist() , a_ )
| 657 | 1 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : str = [0] * len(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Tuple = []
lowerCAmelCase : Any = []
lowerCAmelCase : Dict = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if indegree[i] == 0:
queue.append(SCREAMING_SNAKE_CASE__ )
while queue:
lowerCAmelCase : Any = queue.pop(0 )
cnt += 1
topo.append(SCREAMING_SNAKE_CASE__ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(SCREAMING_SNAKE_CASE__ )
if cnt != len(SCREAMING_SNAKE_CASE__ ):
print("""Cycle exists""" )
else:
print(SCREAMING_SNAKE_CASE__ )
# Adjacency List of Graph
lowerCAmelCase : Dict ={0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 693 |
import os
import string
import sys
lowerCAmelCase : Optional[int] =1 << 8
lowerCAmelCase : List[Any] ={
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
lowerCAmelCase : Optional[Any] =KEYMAP['up']
lowerCAmelCase : Tuple =KEYMAP['left']
if sys.platform == "win32":
lowerCAmelCase : Dict =[]
lowerCAmelCase : int ={
b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCAmelCase : Optional[Any] =ord(str(i))
def _UpperCAmelCase ( ):
'''simple docstring'''
if os.name == "nt":
import msvcrt
lowerCAmelCase : Any = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(SCREAMING_SNAKE_CASE__ ) == 0:
# Read the keystroke
lowerCAmelCase : int = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase : Tuple = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase : str = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE__ )
if ord(SCREAMING_SNAKE_CASE__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_2_6 ) )
lowerCAmelCase : Optional[Any] = chr(KEYMAP["""esc"""] )
except KeyError:
lowerCAmelCase : Optional[int] = cha[1]
else:
lowerCAmelCase : Any = ch.decode(SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase : Optional[int] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase : List[Any] = sys.stdin.fileno()
lowerCAmelCase : str = termios.tcgetattr(SCREAMING_SNAKE_CASE__ )
try:
tty.setraw(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(SCREAMING_SNAKE_CASE__ ,termios.TCSADRAIN ,SCREAMING_SNAKE_CASE__ )
return ch
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : Any = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(SCREAMING_SNAKE_CASE__ ) == KEYMAP["esc"]:
lowerCAmelCase : int = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) == KEYMAP["mod_int"]:
lowerCAmelCase : Tuple = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(SCREAMING_SNAKE_CASE__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 693 | 1 |
"""simple docstring"""
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _lowerCAmelCase ( *UpperCamelCase_ ):
with open(UpperCamelCase_ , """r""" ) as fh:
fcntl.flock(UpperCamelCase_ , fcntl.LOCK_EX )
try:
print(*UpperCamelCase_ )
finally:
fcntl.flock(UpperCamelCase_ , fcntl.LOCK_UN )
__magic_name__ = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
__magic_name__ = torch.device("cuda", local_rank)
__magic_name__ = socket.gethostname()
__magic_name__ = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__magic_name__ = dist.get_rank()
__magic_name__ = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 155 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : UNetaDModel
__lowercase : KarrasVeScheduler
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__):
super().__init__()
self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__)
@torch.no_grad()
def __call__( self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 5_0 , lowerCAmelCase__ = None , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , **lowerCAmelCase__ , ):
__SCREAMING_SNAKE_CASE = self.unet.config.sample_size
__SCREAMING_SNAKE_CASE = (batch_size, 3, img_size, img_size)
__SCREAMING_SNAKE_CASE = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
__SCREAMING_SNAKE_CASE = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(lowerCAmelCase__)
for t in self.progress_bar(self.scheduler.timesteps):
# here sigma_t == t_i from the paper
__SCREAMING_SNAKE_CASE = self.scheduler.schedule[t]
__SCREAMING_SNAKE_CASE = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.scheduler.add_noise_to_input(lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__)
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
__SCREAMING_SNAKE_CASE = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
__SCREAMING_SNAKE_CASE = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
__SCREAMING_SNAKE_CASE = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2).sample
__SCREAMING_SNAKE_CASE = self.scheduler.step_correct(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , step_output.prev_sample , step_output["""derivative"""] , )
__SCREAMING_SNAKE_CASE = step_output.prev_sample
__SCREAMING_SNAKE_CASE = (sample / 2 + 0.5).clamp(0 , 1)
__SCREAMING_SNAKE_CASE = sample.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE = self.numpy_to_pil(lowerCAmelCase__)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase__)
| 155 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 713 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "pix2struct_text_model"
lowerCamelCase_ = ["past_key_values"]
lowerCamelCase_ = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self :Union[str, Any] , __A :Any=5_0244 , __A :Optional[Any]=768 , __A :Tuple=64 , __A :List[str]=2048 , __A :int=12 , __A :str=12 , __A :Any=32 , __A :Tuple=128 , __A :int=0.1 , __A :str=1E-6 , __A :Optional[Any]=1.0 , __A :Union[str, Any]="gelu_new" , __A :Any=0 , __A :List[str]=False , __A :Optional[Any]=0 , __A :int=1 , __A :Optional[int]=False , __A :Optional[Any]=True , **__A :List[Any] , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = d_kv
SCREAMING_SNAKE_CASE__ = d_ff
SCREAMING_SNAKE_CASE__ = num_layers
SCREAMING_SNAKE_CASE__ = num_heads
SCREAMING_SNAKE_CASE__ = relative_attention_num_buckets
SCREAMING_SNAKE_CASE__ = relative_attention_max_distance
SCREAMING_SNAKE_CASE__ = dropout_rate
SCREAMING_SNAKE_CASE__ = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ = initializer_factor
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = eos_token_id
SCREAMING_SNAKE_CASE__ = decoder_start_token_id
# for backwards compatibility
SCREAMING_SNAKE_CASE__ = dense_act_fn
super().__init__(
pad_token_id=__A , eos_token_id=__A , decoder_start_token_id=__A , tie_word_embeddings=__A , is_decoder=__A , **__A , )
@classmethod
def _snake_case ( cls :Optional[int] , __A :Union[str, os.PathLike] , **__A :Optional[int] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__A )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = cls.get_config_dict(__A , **__A )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
SCREAMING_SNAKE_CASE__ = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "pix2struct_vision_model"
def __init__( self :Optional[int] , __A :int=768 , __A :Optional[Any]=768 , __A :Union[str, Any]=2048 , __A :int=64 , __A :Union[str, Any]=12 , __A :str=12 , __A :Any="gelu_new" , __A :List[Any]=1E-6 , __A :Dict=0.0 , __A :int=0.0 , __A :int=1E-10 , __A :Dict=1.0 , __A :int=4096 , __A :int=32 , __A :int=128 , **__A :Tuple , ) -> str:
"""simple docstring"""
super().__init__(**__A )
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = patch_embed_hidden_size
SCREAMING_SNAKE_CASE__ = d_ff
SCREAMING_SNAKE_CASE__ = dropout_rate
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = initializer_factor
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = dense_act_fn
SCREAMING_SNAKE_CASE__ = seq_len
SCREAMING_SNAKE_CASE__ = relative_attention_num_buckets
SCREAMING_SNAKE_CASE__ = relative_attention_max_distance
SCREAMING_SNAKE_CASE__ = d_kv
@classmethod
def _snake_case ( cls :str , __A :Union[str, os.PathLike] , **__A :str ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__A )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
SCREAMING_SNAKE_CASE__ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "pix2struct"
lowerCamelCase_ = True
def __init__( self :str , __A :Optional[Any]=None , __A :List[str]=None , __A :Optional[Any]=1.0 , __A :Optional[Any]=0.0_2 , __A :Any=False , __A :Tuple=False , __A :Any=True , **__A :Dict , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(tie_word_embeddings=__A , is_encoder_decoder=__A , **__A )
if text_config is None:
SCREAMING_SNAKE_CASE__ = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
SCREAMING_SNAKE_CASE__ = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
SCREAMING_SNAKE_CASE__ = PixaStructTextConfig(**__A )
SCREAMING_SNAKE_CASE__ = PixaStructVisionConfig(**__A )
SCREAMING_SNAKE_CASE__ = self.text_config.decoder_start_token_id
SCREAMING_SNAKE_CASE__ = self.text_config.pad_token_id
SCREAMING_SNAKE_CASE__ = self.text_config.eos_token_id
SCREAMING_SNAKE_CASE__ = initializer_factor
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = self.initializer_range
SCREAMING_SNAKE_CASE__ = self.initializer_range
SCREAMING_SNAKE_CASE__ = is_vqa
@classmethod
def _snake_case ( cls :Union[str, Any] , __A :PixaStructTextConfig , __A :PixaStructVisionConfig , **__A :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A )
def _snake_case ( self :str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.text_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.__class__.model_type
return output | 59 | 0 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__snake_case = XGLMTokenizer
__snake_case = XGLMTokenizerFast
__snake_case = True
__snake_case = True
def lowercase__ ( self : int ) -> Any:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A__ : Tuple =XGLMTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
A__ : Tuple ="""<pad>"""
A__ : Tuple =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def lowercase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Optional[int] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(__lowerCamelCase ) , 10_08 )
def lowercase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_08 )
def lowercase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[str] =XGLMTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
A__ : Dict =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__lowerCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A__ : Tuple =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A__ : int =tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def lowercase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__lowerCamelCase , f.name )
A__ : List[str] =XGLMTokenizer(f.name , keep_accents=__lowerCamelCase )
A__ : Union[str, Any] =pickle.dumps(__lowerCamelCase )
pickle.loads(__lowerCamelCase )
def lowercase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
A__ : Union[str, Any] =self.get_tokenizer()
A__ : Optional[Any] =self.get_rust_tokenizer()
A__ : Optional[int] ="""I was born in 92000, and this is falsé."""
A__ : Any =tokenizer.tokenize(__lowerCamelCase )
A__ : str =rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
A__ : Optional[int] =tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
A__ : Dict =rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
A__ : Optional[int] =self.get_rust_tokenizer()
A__ : Union[str, Any] =tokenizer.encode(__lowerCamelCase )
A__ : List[str] =rust_tokenizer.encode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@slow
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ : str ="""Hello World!"""
A__ : Union[str, Any] =[2, 3_12_27, 44_47, 35]
self.assertListEqual(__lowerCamelCase , self.big_tokenizer.encode(__lowerCamelCase ) )
@slow
def lowercase__ ( self : int ) -> List[str]:
'''simple docstring'''
A__ : Any =(
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
A__ : List[str] =[2, 10_18, 67, 11, 19_88, 26_17, 56_31, 2_78, 11, 34_07, 48, 7_16_30, 2_80_85, 4, 32_34, 1_57, 13, 6, 5, 6, 4, 35_26, 7_68, 15, 6_59, 57, 2_98, 39_83, 8_64, 1_29, 21, 6, 5, 1_36_75, 3_77, 6_52, 75_80, 1_03_41, 1_55, 28_17, 4_22, 16_66, 7, 16_74, 53, 1_13, 20_22_77, 1_78_92, 33, 60, 87, 4, 32_34, 1_57, 61, 26_67, 5_23_76, 19, 88, 23, 7_35]
# fmt: on
self.assertListEqual(__lowerCamelCase , self.big_tokenizer.encode(__lowerCamelCase ) )
@slow
def lowercase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
A__ : List[Any] ={
"""input_ids""": [[2, 10_88_25, 11_63, 15, 8_80_10, 4_73, 1_58_98, 1_57, 1_36_72, 18_57, 3_12, 8, 23_80_21, 11_63, 53, 1_36_72, 18_57, 3_12, 8, 5_32_83, 18_23_96, 8, 1_85_66, 16, 3_67_33, 41_01, 8, 2_30, 24_40_17, 12_25_53, 7, 15, 13_25_97, 4, 2_93, 1_25_11, 76_10, 4, 34_14, 13_25_97, 9, 4, 3_23_61, 3_62, 4, 7_34, 2_85_12, 3_25_69, 18, 4, 3_23_61, 2_60_96, 1_49_82, 73, 1_87_15, 2_14_33, 23_52_61, 15, 4_92, 1_24_27, 16, 53, 1_87_15, 2_14_33, 6_54_54, 15, 2_36_59, 5_63, 16, 2_78, 5_97, 28_43, 5_95, 79_31, 18_23_96, 6_41_86, 22, 8_86, 5_95, 13_29_81, 53, 2_55_40, 34_49, 4_39_82, 3_99_01, 59_51, 8_78, 3_30, 4, 2_76_94, 8_02_69, 3_12, 53, 65_17, 1_17_80, 6_11, 2_04_08, 5], [2, 6, 13_25_97, 67, 4_28_97, 33, 5_92, 8, 16_37_29, 2_55_40, 3_61, 13_69_97, 10_95_14, 17_32_30, 7, 5_01, 60, 10_29_13, 1_96, 56_31, 2_35, 6_32_43, 4_73, 6, 23_17_57, 74, 52_77, 79_05, 53, 30_95, 3_73_17, 22, 4_54, 18_38_74, 5], [2, 2_68, 3_12_98, 4_65_30, 6, 13_29_35, 4_38_31, 7, 5_97, 32, 24, 36_88, 98_65, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="""facebook/xglm-564M""" , padding=__lowerCamelCase , )
| 215 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
_snake_case = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
_snake_case = '''The dog is cute and lives in the garden house'''
_snake_case = jnp.array([tokenizer.encode(__lowerCamelCase )] )
_snake_case = (1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim
_snake_case = jnp.array(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
_snake_case = model(__lowerCamelCase )['''last_hidden_state''']
self.assertEqual(output.shape , __lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , __lowerCamelCase , atol=1E-3 ) )
| 103 | 0 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = BertTokenizer
UpperCamelCase = BertTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = filter_non_english
def a__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().setUp()
lowerCamelCase_ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def a__ ( self : List[str] , A_ : str ) -> str:
"""simple docstring"""
lowerCamelCase_ = 'UNwant\u00E9d,running'
lowerCamelCase_ = 'unwanted, running'
return input_text, output_text
def a__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(A_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [9, 6, 7, 12, 10, 11] )
def a__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = 'UNwant\u00E9d,running'
lowerCamelCase_ = tokenizer.tokenize(A_ )
lowerCamelCase_ = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = tokenizer.encode(A_ )
lowerCamelCase_ = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
# With lower casing
lowerCamelCase_ = self.get_tokenizer(do_lower_case=A_ )
lowerCamelCase_ = self.get_rust_tokenizer(do_lower_case=A_ )
lowerCamelCase_ = 'UNwant\u00E9d,running'
lowerCamelCase_ = tokenizer.tokenize(A_ )
lowerCamelCase_ = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = tokenizer.encode(A_ )
lowerCamelCase_ = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
def a__ ( self : str ) -> Any:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def a__ ( self : Any ) -> int:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def a__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer()
lowerCamelCase_ = 'a\n\'ll !!to?\'d of, can\'t.'
lowerCamelCase_ = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(A_ ) , A_ )
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
lowerCamelCase_ = {}
for i, token in enumerate(A_ ):
lowerCamelCase_ = i
lowerCamelCase_ = WordpieceTokenizer(vocab=A_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def a__ ( self : str ) -> str:
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def a__ ( self : str ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class.from_pretrained('bert-base-uncased' )
lowerCamelCase_ = tokenizer.encode('sequence builders' , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer.encode('multi-sequence build' , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(A_ )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
lowerCamelCase_ = tokenizer_r.encode_plus(
A_ , return_attention_mask=A_ , return_token_type_ids=A_ , return_offsets_mapping=A_ , add_special_tokens=A_ , )
lowerCamelCase_ = tokenizer_r.do_lower_case if hasattr(A_ , 'do_lower_case' ) else False
lowerCamelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = ['的', '人', '有']
lowerCamelCase_ = ''.join(A_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = tokenizer_p.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer_r.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(A_ )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = False
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = tokenizer_r.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer_p.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(A_ )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase_ = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(A_ )
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ ) | 718 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class A:
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = None
UpperCamelCase = None
lowerCamelCase : str = namedtuple("CoinsDistribResult", "moves excess")
def _SCREAMING_SNAKE_CASE ( lowercase : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(lowercase : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowercase : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowercase ) != count_coins(lowercase ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(lowercase : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowerCamelCase_ , lowerCamelCase_ = get_distrib(node.left )
lowerCamelCase_ , lowerCamelCase_ = get_distrib(node.right )
lowerCamelCase_ = 1 - left_distrib_excess
lowerCamelCase_ = 1 - right_distrib_excess
lowerCamelCase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowercase )
+ abs(lowercase )
)
lowerCamelCase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowercase , lowercase )
return get_distrib(lowercase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 651 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Generic, TypeVar
lowerCAmelCase : Dict = TypeVar('T')
class SCREAMING_SNAKE_CASE__ ( Generic[T]):
def __init__( self , A_ )-> None:
'''simple docstring'''
UpperCamelCase = data
UpperCamelCase = self
UpperCamelCase = 0
class SCREAMING_SNAKE_CASE__ ( Generic[T]):
def __init__( self )-> None:
'''simple docstring'''
UpperCamelCase = {}
def UpperCAmelCase_ ( self , A_ )-> None:
'''simple docstring'''
UpperCamelCase = DisjointSetTreeNode(A_ )
def UpperCAmelCase_ ( self , A_ )-> DisjointSetTreeNode[T]:
'''simple docstring'''
UpperCamelCase = self.map[data]
if elem_ref != elem_ref.parent:
UpperCamelCase = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def UpperCAmelCase_ ( self , A_ , A_ )-> None:
'''simple docstring'''
if nodea.rank > nodea.rank:
UpperCamelCase = nodea
else:
UpperCamelCase = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def UpperCAmelCase_ ( self , A_ , A_ )-> None:
'''simple docstring'''
self.link(self.find_set(A_ ) , self.find_set(A_ ) )
class SCREAMING_SNAKE_CASE__ ( Generic[T]):
def __init__( self )-> None:
'''simple docstring'''
UpperCamelCase = {}
def UpperCAmelCase_ ( self , A_ )-> None:
'''simple docstring'''
if node not in self.connections:
UpperCamelCase = {}
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> None:
'''simple docstring'''
self.add_node(A_ )
self.add_node(A_ )
UpperCamelCase = weight
UpperCamelCase = weight
def UpperCAmelCase_ ( self )-> GraphUndirectedWeighted[T]:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda A_ : x[2] )
# creating the disjoint set
UpperCamelCase = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(A_ )
# MST generation
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
UpperCamelCase , UpperCamelCase , UpperCamelCase = edges[index]
index += 1
UpperCamelCase = disjoint_set.find_set(A_ )
UpperCamelCase = disjoint_set.find_set(A_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(A_ , A_ , A_ )
disjoint_set.union(A_ , A_ )
return graph
| 3 | """simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE__:Tuple = logging.get_logger(__name__)
def _lowerCamelCase( a ):
if isinstance(a , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(a , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(a ):
return [[videos]]
raise ValueError(F"Could not make batched video from {videos}" )
class snake_case__ ( snake_case_ ):
_snake_case : List[Any] = ["""pixel_values"""]
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BILINEAR , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
__a = size if size is not None else {"shortest_edge": 256}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else {"height": 224, "width": 224}
__a = get_size_dict(lowerCamelCase , param_name="crop_size" )
__a = do_resize
__a = size
__a = do_center_crop
__a = crop_size
__a = resample
__a = do_rescale
__a = rescale_factor
__a = offset
__a = do_normalize
__a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__a = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BILINEAR , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" in size:
__a = get_resize_output_image_size(lowerCamelCase , size["shortest_edge"] , default_to_square=lowerCamelCase )
elif "height" in size and "width" in size:
__a = (size["height"], size["width"])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = True , lowerCamelCase = None , **lowerCamelCase , ):
__a = image.astype(np.floataa )
if offset:
__a = image - (scale / 2)
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
__a = to_numpy_array(lowerCamelCase )
if do_resize:
__a = self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase )
if do_center_crop:
__a = self.center_crop(lowerCamelCase , size=lowerCamelCase )
if do_rescale:
__a = self.rescale(image=lowerCamelCase , scale=lowerCamelCase , offset=lowerCamelCase )
if do_normalize:
__a = self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase )
__a = to_channel_dimension_format(lowerCamelCase , lowerCamelCase )
return image
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
__a = do_resize if do_resize is not None else self.do_resize
__a = resample if resample is not None else self.resample
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = offset if offset is not None else self.offset
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = size if size is not None else self.size
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(lowerCamelCase , param_name="crop_size" )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
__a = make_batched(lowerCamelCase )
__a = [
[
self._preprocess_image(
image=lowerCamelCase , do_resize=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , do_center_crop=lowerCamelCase , crop_size=lowerCamelCase , do_rescale=lowerCamelCase , rescale_factor=lowerCamelCase , offset=lowerCamelCase , do_normalize=lowerCamelCase , image_mean=lowerCamelCase , image_std=lowerCamelCase , data_format=lowerCamelCase , )
for img in video
]
for video in videos
]
__a = {"pixel_values": videos}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 528 | 0 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
_lowerCAmelCase :int = {
"""/attention/""": """/0/SelfAttention/""",
"""/self_attention/""": """/0/SelfAttention/""",
"""/encoder_decoder_attention/""": """/1/EncDecAttention/""",
"""value""": """v""",
"""query""": """q""",
"""key""": """k""",
"""out""": """o""",
"""pre_self_attention_layer_norm""": """0/layer_norm""",
"""pre_cross_attention_layer_norm""": """1/layer_norm""",
"""pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong
"""token_embedder""": """shared""",
"""encoder_norm""": """final_layer_norm""",
"""decoder_norm""": """final_layer_norm""",
"""relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""",
"""router/router_weights/w/""": """router/classifier/""",
"""roer/roer_weights/w/""": """router/classifier/""",
"""logits_dense""": """lm_head""",
}
def __lowerCAmelCase ( a_ ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = list(s_dict.keys() )
for key in keys:
SCREAMING_SNAKE_CASE : List[str] = R'.*/layers_(\d+)'
SCREAMING_SNAKE_CASE : Dict = key
if re.match(a_ , a_ ):
SCREAMING_SNAKE_CASE : Optional[int] = re.sub(R'layers_(\d+)' , R'block/\1/layer' , a_ )
SCREAMING_SNAKE_CASE : Tuple = R'(encoder|decoder)\/'
if re.match(a_ , a_ ):
SCREAMING_SNAKE_CASE : List[str] = re.match(a_ , a_ ).groups()
if groups[0] == "encoder":
SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(R'/mlp/' , R'/1/mlp/' , a_ )
SCREAMING_SNAKE_CASE : Optional[int] = re.sub(R'/pre_mlp_layer_norm/' , R'/1/layer_norm/' , a_ )
elif groups[0] == "decoder":
SCREAMING_SNAKE_CASE : int = re.sub(R'/mlp/' , R'/2/mlp/' , a_ )
SCREAMING_SNAKE_CASE : int = re.sub(R'/pre_mlp_layer_norm/' , R'/2/layer_norm/' , a_ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
SCREAMING_SNAKE_CASE : Any = new_key.replace(a_ , a_ )
print(f"""{key} -> {new_key}""" )
SCREAMING_SNAKE_CASE : int = s_dict.pop(a_ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE : Tuple = s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE : Dict = s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
SCREAMING_SNAKE_CASE : Dict = s_dict[key].shape[0]
SCREAMING_SNAKE_CASE : List[str] = s_dict[key]
for idx in range(a_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = expert_weihts[idx]
print(f"""{key} -> {key.replace("expert/" , "nested fstring" )}""" )
s_dict.pop(a_ )
return s_dict
_lowerCAmelCase :int = {
"""NUM_ENCODER_LAYERS""": """num_layers""",
"""NUM_DECODER_LAYERS""": """num_decoder_layers""",
"""NUM_HEADS""": """num_heads""",
"""HEAD_DIM""": """d_kv""",
"""EMBED_DIM""": """d_model""",
"""MLP_DIM""": """d_ff""",
"""NUM_SELECTED_EXPERTS""": """num_selected_experts""",
"""NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""",
"""NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""",
"""dense.MlpBlock.activations""": """feed_forward_proj""",
}
def __lowerCAmelCase ( a_ , a_ ) -> int:
'''simple docstring'''
import regex as re
with open(a_ , 'r' ) as f:
SCREAMING_SNAKE_CASE : Dict = f.read()
SCREAMING_SNAKE_CASE : Optional[Any] = re.findall(R'(.*) = ([0-9.]*)' , a_ )
SCREAMING_SNAKE_CASE : Dict = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
SCREAMING_SNAKE_CASE : str = float(a_ ) if '.' in value else int(a_ )
SCREAMING_SNAKE_CASE : Any = re.findall(R'(.*activations) = \(\'(.*)\',\)' , a_ )[0]
SCREAMING_SNAKE_CASE : int = str(activation[1] )
SCREAMING_SNAKE_CASE : List[str] = num_experts
SCREAMING_SNAKE_CASE : int = SwitchTransformersConfig(**a_ )
return config
def __lowerCAmelCase ( a_ , a_ , a_=None , a_="./" , a_=8 ) -> Any:
'''simple docstring'''
print(f"""Loading flax weights from : {flax_checkpoint_path}""" )
SCREAMING_SNAKE_CASE : int = checkpoints.load_tax_checkpoint(a_ )
if gin_file is not None:
SCREAMING_SNAKE_CASE : str = convert_gin_to_config(a_ , a_ )
else:
SCREAMING_SNAKE_CASE : str = SwitchTransformersConfig.from_pretrained(a_ )
SCREAMING_SNAKE_CASE : str = SwitchTransformersForConditionalGeneration(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = flax_params['target']
SCREAMING_SNAKE_CASE : Optional[int] = flatten_dict(a_ , sep='/' )
SCREAMING_SNAKE_CASE : List[str] = rename_keys(a_ )
SCREAMING_SNAKE_CASE : Tuple = unflatten_dict(a_ , sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(a_ , a_ )
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(a_ )
if __name__ == "__main__":
_lowerCAmelCase :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"""
""" model architecture. If not provided, a `gin_file` has to be provided."""
),
)
parser.add_argument(
"""--gin_file""",
default=None,
type=str,
required=False,
help="""Path to the gin config file. If not provided, a `config_file` has to be passed """,
)
parser.add_argument(
"""--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model."""
)
parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""")
_lowerCAmelCase :Dict = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 179 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase :int = {
"""configuration_albert""": ["""ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AlbertConfig""", """AlbertOnnxConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :str = ["""AlbertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = ["""AlbertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :List[str] = [
"""ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AlbertForMaskedLM""",
"""AlbertForMultipleChoice""",
"""AlbertForPreTraining""",
"""AlbertForQuestionAnswering""",
"""AlbertForSequenceClassification""",
"""AlbertForTokenClassification""",
"""AlbertModel""",
"""AlbertPreTrainedModel""",
"""load_tf_weights_in_albert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
"""TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAlbertForMaskedLM""",
"""TFAlbertForMultipleChoice""",
"""TFAlbertForPreTraining""",
"""TFAlbertForQuestionAnswering""",
"""TFAlbertForSequenceClassification""",
"""TFAlbertForTokenClassification""",
"""TFAlbertMainLayer""",
"""TFAlbertModel""",
"""TFAlbertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = [
"""FlaxAlbertForMaskedLM""",
"""FlaxAlbertForMultipleChoice""",
"""FlaxAlbertForPreTraining""",
"""FlaxAlbertForQuestionAnswering""",
"""FlaxAlbertForSequenceClassification""",
"""FlaxAlbertForTokenClassification""",
"""FlaxAlbertModel""",
"""FlaxAlbertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 179 | 1 |
"""simple docstring"""
from math import factorial
def lowerCamelCase_( _lowerCamelCase = 100 ) -> int:
'''simple docstring'''
return sum(map(_lowerCamelCase , str(factorial(_lowerCamelCase ) ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip()))) | 46 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
def __init__( self: Dict ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: int=13 ,__lowerCAmelCase: List[str]=30 ,__lowerCAmelCase: List[str]=2 ,__lowerCAmelCase: Dict=3 ,__lowerCAmelCase: Tuple=True ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: Optional[Any]=32 ,__lowerCAmelCase: List[Any]=5 ,__lowerCAmelCase: int=4 ,__lowerCAmelCase: Optional[int]=37 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: str=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Optional[Any]=10 ,__lowerCAmelCase: List[str]=0.02 ,__lowerCAmelCase: Union[str, Any]=3 ,__lowerCAmelCase: Tuple=0.6 ,__lowerCAmelCase: Dict=None ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : Any = image_size
_lowerCamelCase : List[str] = patch_size
_lowerCamelCase : Union[str, Any] = num_channels
_lowerCamelCase : List[str] = is_training
_lowerCamelCase : str = use_labels
_lowerCamelCase : List[Any] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Dict = mask_ratio
_lowerCamelCase : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCamelCase : str = (image_size // patch_size) ** 2
_lowerCamelCase : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def _lowercase ( self: Any ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = ViTMAEModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self: List[str] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : List[Any] = ViTMAEForPreTraining(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = (self.image_size // self.patch_size) ** 2
_lowerCamelCase : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCamelCase : str = 1
_lowerCamelCase : Tuple = ViTMAEForPreTraining(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
_lowerCamelCase : Any = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : int = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = config_and_inputs
_lowerCamelCase : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCAmelCase__ = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = ViTMAEModelTester(self )
_lowerCamelCase : List[str] = ConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_lowerCamelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase ,nn.Linear ) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCamelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase )
def _lowercase ( self: Any ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : Optional[Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
_lowerCamelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase : Union[str, Any] = torch.from_numpy(__lowerCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCamelCase : Dict = pt_noise
super().check_pt_tf_models(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCamelCase : int = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : Any = outputs[0].cpu().numpy()
_lowerCamelCase : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : str = model_class.from_pretrained(__lowerCAmelCase )
model.to(__lowerCAmelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCamelCase : Dict = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
# Make sure we don't have nans
_lowerCamelCase : Union[str, Any] = after_outputs[0].cpu().numpy()
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowerCAmelCase ,1e-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: str ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def _lowercase ( self: int ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase ( self: Dict ):
'''simple docstring'''
pass
@slow
def _lowercase ( self: Dict ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[Any] = ViTMAEModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: str ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def _lowercase ( self: int ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : List[str] = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__lowerCAmelCase )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : int = prepare_img()
_lowerCamelCase : Tuple = image_processor(images=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCamelCase : Tuple = ViTMAEConfig()
_lowerCamelCase : Union[str, Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCamelCase : Optional[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__lowerCAmelCase ,noise=torch.from_numpy(__lowerCAmelCase ).to(device=__lowerCAmelCase ) )
# verify the logits
_lowerCamelCase : Any = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape ,__lowerCAmelCase )
_lowerCamelCase : Tuple = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,expected_slice.to(__lowerCAmelCase ) ,atol=1e-4 ) ) | 46 | 1 |
'''simple docstring'''
def __A ( a_ : int = 1_0**9 ):
lowerCAmelCase : Optional[Any] = 1
lowerCAmelCase : Tuple = 2
lowerCAmelCase : Any = 0
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : Optional[Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
lowerCAmelCase : Optional[Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F'''{solution() = }''')
| 713 |
'''simple docstring'''
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __A ( a_ : Any=None ,a_ : List[Any]=None ):
return field(default_factory=lambda: default ,metadata=a_ )
@dataclass
class lowerCamelCase :
snake_case_ = field(
metadata={"help": "The csv file to plot."} , )
snake_case_ = field(
default=_A , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
snake_case_ = field(
default=_A , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
snake_case_ = field(
default=_A , metadata={"help": "Disable logarithmic scale when plotting"} , )
snake_case_ = field(
default=_A , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
snake_case_ = field(
default=_A , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
snake_case_ = list_field(
default=_A , metadata={"help": "List of model names that are used instead of the ones in the csv file."} )
def __A ( a_ : Tuple ):
try:
int(a_ )
return True
except ValueError:
return False
def __A ( a_ : int ):
try:
float(a_ )
return True
except ValueError:
return False
class lowerCamelCase :
def __init__( self , a_ ):
lowerCAmelCase : Optional[Any] = args
lowerCAmelCase : List[str] = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="" ) as csv_file:
lowerCAmelCase : str = csv.DictReader(a_ )
for row in reader:
lowerCAmelCase : Tuple = row["model"]
self.result_dict[model_name]["bsz"].append(int(row["batch_size"] ) )
self.result_dict[model_name]["seq_len"].append(int(row["sequence_length"] ) )
if can_convert_to_int(row["result"] ):
# value is not None
lowerCAmelCase : Union[str, Any] = int(row["result"] )
elif can_convert_to_float(row["result"] ):
# value is not None
lowerCAmelCase : Optional[int] = float(row["result"] )
def _lowerCamelCase ( self ):
lowerCAmelCase , lowerCAmelCase : Any = plt.subplots()
lowerCAmelCase : int = "Time usage" if self.args.is_time else "Memory usage"
lowerCAmelCase : List[Any] = title_str + " for training" if self.args.is_train else title_str + " for inference"
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("log" )
ax.set_yscale("log" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
lowerCAmelCase : str = sorted(set(self.result_dict[model_name]["bsz"] ) )
lowerCAmelCase : List[str] = sorted(set(self.result_dict[model_name]["seq_len"] ) )
lowerCAmelCase : Union[str, Any] = self.result_dict[model_name]["result"]
((lowerCAmelCase) , (lowerCAmelCase)) : str = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
lowerCAmelCase : Union[str, Any] = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
lowerCAmelCase : int = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=a_ , )
else:
lowerCAmelCase : Any = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((lowerCAmelCase) , (lowerCAmelCase)) : Any = (
("batch_size", "len") if self.args.plot_along_batch else ("in #tokens", "bsz")
)
lowerCAmelCase : Union[str, Any] = np.asarray(a_ , a_ )[: len(a_ )]
plt.scatter(
a_ , a_ , label=F'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(a_ , a_ , "--" )
title_str += F''' {label_model_name} vs.'''
lowerCAmelCase : List[str] = title_str[:-4]
lowerCAmelCase : List[Any] = "Time in s" if self.args.is_time else "Memory in MB"
# plot
plt.title(a_ )
plt.xlabel(a_ )
plt.ylabel(a_ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __A ( ):
lowerCAmelCase : Optional[Any] = HfArgumentParser(a_ )
lowerCAmelCase : List[Any] = parser.parse_args_into_dataclasses()[0]
lowerCAmelCase : str = Plot(args=a_ )
plot.plot()
if __name__ == "__main__":
main()
| 551 | 0 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
UpperCamelCase_ = re.compile(R'\b(a|an|the)\b', re.UNICODE)
UpperCamelCase_ = None
def _UpperCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ =argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=__UpperCamelCase , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=__UpperCamelCase , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def _UpperCAmelCase ( A ):
'''simple docstring'''
UpperCAmelCase__ ={}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase__ =bool(qa["answers"]["text"] )
return qid_to_has_ans
def _UpperCAmelCase ( A ):
'''simple docstring'''
def remove_articles(A ):
return ARTICLES_REGEX.sub(" " , __UpperCamelCase )
def white_space_fix(A ):
return " ".join(text.split() )
def remove_punc(A ):
UpperCAmelCase__ =set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__UpperCamelCase ) ) ) )
def _UpperCAmelCase ( A ):
'''simple docstring'''
if not s:
return []
return normalize_answer(__UpperCamelCase ).split()
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
return int(normalize_answer(__UpperCamelCase ) == normalize_answer(__UpperCamelCase ) )
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
UpperCAmelCase__ =get_tokens(__UpperCamelCase )
UpperCAmelCase__ =get_tokens(__UpperCamelCase )
UpperCAmelCase__ =collections.Counter(__UpperCamelCase ) & collections.Counter(__UpperCamelCase )
UpperCAmelCase__ =sum(common.values() )
if len(__UpperCamelCase ) == 0 or len(__UpperCamelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
UpperCAmelCase__ =1.0 * num_same / len(__UpperCamelCase )
UpperCAmelCase__ =1.0 * num_same / len(__UpperCamelCase )
UpperCAmelCase__ =(2 * precision * recall) / (precision + recall)
return fa
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
UpperCAmelCase__ ={}
UpperCAmelCase__ ={}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase__ =qa["id"]
UpperCAmelCase__ =[t for t in qa["answers"]["text"] if normalize_answer(__UpperCamelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
UpperCAmelCase__ =[""]
if qid not in preds:
print(F"""Missing prediction for {qid}""" )
continue
UpperCAmelCase__ =preds[qid]
# Take max over all gold answers
UpperCAmelCase__ =max(compute_exact(__UpperCamelCase , __UpperCamelCase ) for a in gold_answers )
UpperCAmelCase__ =max(compute_fa(__UpperCamelCase , __UpperCamelCase ) for a in gold_answers )
return exact_scores, fa_scores
def _UpperCAmelCase ( A , A , A , A ):
'''simple docstring'''
UpperCAmelCase__ ={}
for qid, s in scores.items():
UpperCAmelCase__ =na_probs[qid] > na_prob_thresh
if pred_na:
UpperCAmelCase__ =float(not qid_to_has_ans[qid] )
else:
UpperCAmelCase__ =s
return new_scores
def _UpperCAmelCase ( A , A , A=None ):
'''simple docstring'''
if not qid_list:
UpperCAmelCase__ =len(__UpperCamelCase )
return collections.OrderedDict(
[
("exact", 1_00.0 * sum(exact_scores.values() ) / total),
("f1", 1_00.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
UpperCAmelCase__ =len(__UpperCamelCase )
return collections.OrderedDict(
[
("exact", 1_00.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 1_00.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def _UpperCAmelCase ( A , A , A ):
'''simple docstring'''
for k in new_eval:
UpperCAmelCase__ =new_eval[k]
def _UpperCAmelCase ( A , A , A , A ):
'''simple docstring'''
plt.step(__UpperCamelCase , __UpperCamelCase , color="b" , alpha=0.2 , where="post" )
plt.fill_between(__UpperCamelCase , __UpperCamelCase , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__UpperCamelCase )
plt.savefig(__UpperCamelCase )
plt.clf()
def _UpperCAmelCase ( A , A , A , A , A=None , A=None ):
'''simple docstring'''
UpperCAmelCase__ =sorted(__UpperCamelCase , key=lambda A : na_probs[k] )
UpperCAmelCase__ =0.0
UpperCAmelCase__ =1.0
UpperCAmelCase__ =0.0
UpperCAmelCase__ =[1.0]
UpperCAmelCase__ =[0.0]
UpperCAmelCase__ =0.0
for i, qid in enumerate(__UpperCamelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
UpperCAmelCase__ =true_pos / float(i + 1 )
UpperCAmelCase__ =true_pos / float(__UpperCamelCase )
if i == len(__UpperCamelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__UpperCamelCase )
recalls.append(__UpperCamelCase )
if out_image:
plot_pr_curve(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return {"ap": 1_00.0 * avg_prec}
def _UpperCAmelCase ( A , A , A , A , A , A ):
'''simple docstring'''
if out_image_dir and not os.path.exists(__UpperCamelCase ):
os.makedirs(__UpperCamelCase )
UpperCAmelCase__ =sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
UpperCAmelCase__ =make_precision_recall_eval(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , out_image=os.path.join(__UpperCamelCase , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
UpperCAmelCase__ =make_precision_recall_eval(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , out_image=os.path.join(__UpperCamelCase , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
UpperCAmelCase__ ={k: float(__UpperCamelCase ) for k, v in qid_to_has_ans.items()}
UpperCAmelCase__ =make_precision_recall_eval(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , out_image=os.path.join(__UpperCamelCase , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(__UpperCamelCase , __UpperCamelCase , "pr_exact" )
merge_eval(__UpperCamelCase , __UpperCamelCase , "pr_f1" )
merge_eval(__UpperCamelCase , __UpperCamelCase , "pr_oracle" )
def _UpperCAmelCase ( A , A , A , A ):
'''simple docstring'''
if not qid_list:
return
UpperCAmelCase__ =[na_probs[k] for k in qid_list]
UpperCAmelCase__ =np.ones_like(__UpperCamelCase ) / float(len(__UpperCamelCase ) )
plt.hist(__UpperCamelCase , weights=__UpperCamelCase , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(F"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(__UpperCamelCase , F"""na_prob_hist_{name}.png""" ) )
plt.clf()
def _UpperCAmelCase ( A , A , A , A ):
'''simple docstring'''
UpperCAmelCase__ =sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
UpperCAmelCase__ =num_no_ans
UpperCAmelCase__ =cur_score
UpperCAmelCase__ =0.0
UpperCAmelCase__ =sorted(__UpperCamelCase , key=lambda A : na_probs[k] )
for i, qid in enumerate(__UpperCamelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
UpperCAmelCase__ =scores[qid]
else:
if preds[qid]:
UpperCAmelCase__ =-1
else:
UpperCAmelCase__ =0
cur_score += diff
if cur_score > best_score:
UpperCAmelCase__ =cur_score
UpperCAmelCase__ =na_probs[qid]
return 1_00.0 * best_score / len(__UpperCamelCase ), best_thresh
def _UpperCAmelCase ( A , A , A , A , A , A ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ =find_best_thresh(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ =find_best_thresh(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ =best_exact
UpperCAmelCase__ =exact_thresh
UpperCAmelCase__ =best_fa
UpperCAmelCase__ =fa_thresh
def _UpperCAmelCase ( ):
'''simple docstring'''
with open(OPTS.data_file ) as f:
UpperCAmelCase__ =json.load(__UpperCamelCase )
UpperCAmelCase__ =dataset_json["data"]
with open(OPTS.pred_file ) as f:
UpperCAmelCase__ =json.load(__UpperCamelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
UpperCAmelCase__ =json.load(__UpperCamelCase )
else:
UpperCAmelCase__ ={k: 0.0 for k in preds}
UpperCAmelCase__ =make_qid_to_has_ans(__UpperCamelCase ) # maps qid to True/False
UpperCAmelCase__ =[k for k, v in qid_to_has_ans.items() if v]
UpperCAmelCase__ =[k for k, v in qid_to_has_ans.items() if not v]
UpperCAmelCase__ , UpperCAmelCase__ =get_raw_scores(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ =apply_no_ans_threshold(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , OPTS.na_prob_thresh )
UpperCAmelCase__ =apply_no_ans_threshold(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , OPTS.na_prob_thresh )
UpperCAmelCase__ =make_eval_dict(__UpperCamelCase , __UpperCamelCase )
if has_ans_qids:
UpperCAmelCase__ =make_eval_dict(__UpperCamelCase , __UpperCamelCase , qid_list=__UpperCamelCase )
merge_eval(__UpperCamelCase , __UpperCamelCase , "HasAns" )
if no_ans_qids:
UpperCAmelCase__ =make_eval_dict(__UpperCamelCase , __UpperCamelCase , qid_list=__UpperCamelCase )
merge_eval(__UpperCamelCase , __UpperCamelCase , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , OPTS.out_image_dir )
histogram_na_prob(__UpperCamelCase , __UpperCamelCase , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(__UpperCamelCase , __UpperCamelCase , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
else:
print(json.dumps(__UpperCamelCase , indent=2 ) )
if __name__ == "__main__":
UpperCamelCase_ = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 625 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 301 | 0 |
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list ) -> list:
if len(SCREAMING_SNAKE_CASE__ ) <= 1:
return lst
UpperCAmelCase_ : Optional[int] = 1
while i < len(SCREAMING_SNAKE_CASE__ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = lst[i], lst[i - 1]
i -= 1
if i == 0:
UpperCAmelCase_ : Any = 1
return lst
if __name__ == "__main__":
snake_case_ : Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
snake_case_ : str = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 644 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
snake_case_ : List[Any] = pd.read_csv("sample_data.csv", header=None)
snake_case_ : Optional[Any] = df.shape[:1][0]
# If you're using some other dataset input the target column
snake_case_ : Any = df.iloc[:, 1:2]
snake_case_ : str = actual_data.values.reshape(len_data, 1)
snake_case_ : Optional[Any] = MinMaxScaler().fit_transform(actual_data)
snake_case_ : List[str] = 10
snake_case_ : Any = 5
snake_case_ : Any = 20
snake_case_ : Tuple = len_data - periods * look_back
snake_case_ : str = actual_data[:division]
snake_case_ : Optional[int] = actual_data[division - look_back :]
snake_case_ ,snake_case_ : Any = [], []
snake_case_ ,snake_case_ : Union[str, Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
snake_case_ : Any = np.array(train_x)
snake_case_ : Optional[Any] = np.array(test_x)
snake_case_ : Optional[Any] = np.array([list(i.ravel()) for i in train_y])
snake_case_ : List[str] = np.array([list(i.ravel()) for i in test_y])
snake_case_ : List[Any] = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
snake_case_ : Dict = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
snake_case_ : Optional[Any] = model.predict(x_test)
| 644 | 1 |
'''simple docstring'''
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
UpperCAmelCase_ : Union[str, Any] = True
from torch.cuda.amp import autocast
UpperCAmelCase_ : Tuple = logging.getLogger(__name__)
def A_ ( _lowerCAmelCase : Any=None , _lowerCAmelCase : Union[str, Any]=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=_lowerCAmelCase )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
lowerCAmelCase_ = field(
default=0.1 , metadata={'help': 'The dropout ratio for the attention probabilities.'} )
lowerCAmelCase_ = field(
default=0.1 , metadata={'help': 'The dropout ratio for activations inside the fully connected layer.'} )
lowerCAmelCase_ = field(
default=0.1 , metadata={
'help': 'The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'
} , )
lowerCAmelCase_ = field(
default=0.1 , metadata={'help': 'The dropout probabilitiy for all 1D convolutional layers in feature extractor.'} , )
lowerCAmelCase_ = field(
default=0.0_5 , metadata={
'help': (
'Propability of each feature vector along the time axis to be chosen as the start of the vector'
'span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'
'vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'
)
} , )
lowerCAmelCase_ = field(default=0.0 , metadata={'help': 'The LayerDrop probability.'} )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
default=A , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowerCAmelCase_ = field(
default='train+validation' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowerCAmelCase_ = field(
default=A , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of validation examples to this '
'value if set.'
)
} , )
lowerCAmelCase_ = list_field(
default=[',', '?', '.', '!', '-', ';', ':', '""', '%', '\'', '"', '�'] , metadata={'help': 'A list of characters to remove from the transcripts.'} , )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = True
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def __call__( self : List[str],__A : List[Dict[str, Union[List[int], torch.Tensor]]] ):
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
_lowerCamelCase : Optional[Any] = [{"input_values": feature["input_values"]} for feature in features]
_lowerCamelCase : List[str] = [{"input_ids": feature["labels"]} for feature in features]
_lowerCamelCase : Union[str, Any] = self.processor.pad(
__A,padding=self.padding,max_length=self.max_length,pad_to_multiple_of=self.pad_to_multiple_of,return_tensors="pt",)
_lowerCamelCase : Union[str, Any] = self.processor.pad(
labels=__A,padding=self.padding,max_length=self.max_length_labels,pad_to_multiple_of=self.pad_to_multiple_of_labels,return_tensors="pt",)
# replace padding with -100 to ignore loss correctly
_lowerCamelCase : Dict = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1 ),-1_0_0 )
_lowerCamelCase : Optional[Any] = labels
return batch
class UpperCAmelCase__ ( A ):
def lowerCamelCase_ ( self : Dict,__A : nn.Module,__A : Dict[str, Union[torch.Tensor, Any]] ):
model.train()
_lowerCamelCase : Dict = self._prepare_inputs(__A )
if self.use_amp:
with autocast():
_lowerCamelCase : Union[str, Any] = self.compute_loss(__A,__A )
else:
_lowerCamelCase : Union[str, Any] = self.compute_loss(__A,__A )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
_lowerCamelCase : Optional[int] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_lowerCamelCase : Union[str, Any] = loss.sum() / (inputs["labels"] >= 0).sum()
else:
raise ValueError(f'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' )
if self.args.gradient_accumulation_steps > 1:
_lowerCamelCase : int = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__A ).backward()
elif self.use_apex:
with amp.scale_loss(__A,self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__A )
else:
loss.backward()
return loss.detach()
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_lowerCamelCase : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCamelCase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _lowerCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
_lowerCamelCase : List[str] = datasets.load_dataset(
"common_voice" , data_args.dataset_config_name , split=data_args.train_split_name )
_lowerCamelCase : Any = datasets.load_dataset("common_voice" , data_args.dataset_config_name , split="test" )
# Create and save tokenizer
_lowerCamelCase : List[str] = F'[{"".join(data_args.chars_to_ignore )}]'
def remove_special_characters(_lowerCAmelCase : Optional[int] ):
_lowerCamelCase : List[str] = re.sub(_lowerCAmelCase , "" , batch["sentence"] ).lower() + " "
return batch
_lowerCamelCase : int = train_dataset.map(_lowerCAmelCase , remove_columns=["sentence"] )
_lowerCamelCase : Optional[int] = eval_dataset.map(_lowerCAmelCase , remove_columns=["sentence"] )
def extract_all_chars(_lowerCAmelCase : Optional[Any] ):
_lowerCamelCase : List[Any] = " ".join(batch["text"] )
_lowerCamelCase : List[Any] = list(set(_lowerCAmelCase ) )
return {"vocab": [vocab], "all_text": [all_text]}
_lowerCamelCase : List[str] = train_dataset.map(
_lowerCAmelCase , batched=_lowerCAmelCase , batch_size=-1 , keep_in_memory=_lowerCAmelCase , remove_columns=train_dataset.column_names , )
_lowerCamelCase : List[str] = train_dataset.map(
_lowerCAmelCase , batched=_lowerCAmelCase , batch_size=-1 , keep_in_memory=_lowerCAmelCase , remove_columns=eval_dataset.column_names , )
_lowerCamelCase : Dict = list(set(vocab_train["vocab"][0] ) | set(vocab_test["vocab"][0] ) )
_lowerCamelCase : Any = {v: k for k, v in enumerate(_lowerCAmelCase )}
_lowerCamelCase : str = vocab_dict[" "]
del vocab_dict[" "]
_lowerCamelCase : List[str] = len(_lowerCAmelCase )
_lowerCamelCase : Optional[int] = len(_lowerCAmelCase )
with open("vocab.json" , "w" ) as vocab_file:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : str = WavaVecaCTCTokenizer(
"vocab.json" , unk_token="[UNK]" , pad_token="[PAD]" , word_delimiter_token="|" , )
_lowerCamelCase : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0.0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase )
_lowerCamelCase : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="mean" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
_lowerCamelCase : List[Any] = min(len(_lowerCAmelCase ) , data_args.max_train_samples )
_lowerCamelCase : Any = train_dataset.select(range(_lowerCAmelCase ) )
if data_args.max_val_samples is not None:
_lowerCamelCase : str = eval_dataset.select(range(data_args.max_val_samples ) )
_lowerCamelCase : Any = torchaudio.transforms.Resample(48000 , 16000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(_lowerCAmelCase : Optional[int] ):
_lowerCamelCase , _lowerCamelCase : Dict = torchaudio.load(batch["path"] )
_lowerCamelCase : Dict = resampler(_lowerCAmelCase ).squeeze().numpy()
_lowerCamelCase : List[str] = 16000
_lowerCamelCase : str = batch["text"]
return batch
_lowerCamelCase : Optional[Any] = train_dataset.map(
_lowerCAmelCase , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
_lowerCamelCase : Optional[int] = eval_dataset.map(
_lowerCAmelCase , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(_lowerCAmelCase : List[str] ):
# check that all files have the correct sampling rate
assert (
len(set(batch["sampling_rate"] ) ) == 1
), F'Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'
_lowerCamelCase : str = processor(
audio=batch["speech"] , text=batch["target_text"] , sampling_rate=batch["sampling_rate"][0] )
batch.update(_lowerCAmelCase )
return batch
_lowerCamelCase : Tuple = train_dataset.map(
_lowerCAmelCase , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , )
_lowerCamelCase : Optional[int] = eval_dataset.map(
_lowerCAmelCase , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , )
# Metric
_lowerCamelCase : Optional[Any] = datasets.load_metric("wer" )
def compute_metrics(_lowerCAmelCase : int ):
_lowerCamelCase : Any = pred.predictions
_lowerCamelCase : str = np.argmax(_lowerCAmelCase , axis=-1 )
_lowerCamelCase : Tuple = processor.tokenizer.pad_token_id
_lowerCamelCase : str = processor.batch_decode(_lowerCAmelCase )
# we do not want to group tokens when computing the metrics
_lowerCamelCase : Optional[int] = processor.batch_decode(pred.label_ids , group_tokens=_lowerCAmelCase )
_lowerCamelCase : str = wer_metric.compute(predictions=_lowerCAmelCase , references=_lowerCAmelCase )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
_lowerCamelCase : Tuple = DataCollatorCTCWithPadding(processor=_lowerCAmelCase , padding=_lowerCAmelCase )
# Initialize our Trainer
_lowerCamelCase : Any = CTCTrainer(
model=_lowerCAmelCase , data_collator=_lowerCAmelCase , args=_lowerCAmelCase , compute_metrics=_lowerCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_lowerCamelCase : str = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
_lowerCamelCase : Optional[int] = model_args.model_name_or_path
else:
_lowerCamelCase : Optional[int] = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
_lowerCamelCase : Optional[int] = trainer.train(resume_from_checkpoint=_lowerCAmelCase )
trainer.save_model()
_lowerCamelCase : Union[str, Any] = train_result.metrics
_lowerCamelCase : Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowerCAmelCase )
)
_lowerCamelCase : List[str] = min(_lowerCAmelCase , len(_lowerCAmelCase ) )
trainer.log_metrics("train" , _lowerCAmelCase )
trainer.save_metrics("train" , _lowerCAmelCase )
trainer.save_state()
# Evaluation
_lowerCamelCase : Optional[Any] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_lowerCamelCase : Optional[Any] = trainer.evaluate()
_lowerCamelCase : Optional[int] = data_args.max_val_samples if data_args.max_val_samples is not None else len(_lowerCAmelCase )
_lowerCamelCase : List[str] = min(_lowerCAmelCase , len(_lowerCAmelCase ) )
trainer.log_metrics("eval" , _lowerCAmelCase )
trainer.save_metrics("eval" , _lowerCAmelCase )
return results
if __name__ == "__main__":
main() | 44 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : List[Any] = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = ['ConditionalDetrFeatureExtractor']
UpperCAmelCase_ : str = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 44 | 1 |
"""simple docstring"""
from __future__ import annotations
def _UpperCAmelCase ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float , ) -> tuple[str, float]:
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif stress < 0:
raise ValueError('''Stress cannot be negative''' )
elif tangential_force < 0:
raise ValueError('''Tangential Force cannot be negative''' )
elif area < 0:
raise ValueError('''Area cannot be negative''' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 430 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class lowerCAmelCase__ :
__a = 42
__a = None
# Automatically constructed
__a = "dict"
__a = None
__a = field(default="""Translation""" , init=A_ , repr=A_ )
def __call__( self : Optional[Any] ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowercase ( self : Any ):
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class lowerCAmelCase__ :
__a = None
__a = None
__a = None
# Automatically constructed
__a = "dict"
__a = None
__a = field(default="""TranslationVariableLanguages""" , init=A_ , repr=A_ )
def lowercase ( self : str ):
_snake_case = sorted(set(self.languages ) ) if self.languages else None
_snake_case = len(self.languages ) if self.languages else None
def __call__( self : List[Any] ):
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def lowercase ( self : Tuple , _lowerCamelCase : List[Any] ):
_snake_case = set(self.languages )
if self.languages and set(_lowerCamelCase ) - lang_set:
raise ValueError(
f'''Some languages in example ({', '.join(sorted(set(_lowerCamelCase ) - lang_set ) )}) are not in valid set ({', '.join(_lowerCamelCase )}).''' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
_snake_case = []
for lang, text in translation_dict.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
_snake_case , _snake_case = zip(*sorted(_lowerCamelCase ) )
return {"language": languages, "translation": translations}
def lowercase ( self : List[Any] ):
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 430 | 1 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class snake_case ( UpperCamelCase_ ):
lowercase_ = 'mvp'
lowercase_ = ['past_key_values']
lowercase_ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : List[str] , a_ : List[Any]=5_0267 , a_ : List[Any]=1024 , a_ : Union[str, Any]=12 , a_ : Union[str, Any]=4096 , a_ : Dict=16 , a_ : int=12 , a_ : List[Any]=4096 , a_ : int=16 , a_ : List[Any]=0.0 , a_ : List[str]=0.0 , a_ : Optional[int]="gelu" , a_ : List[Any]=1024 , a_ : Union[str, Any]=0.1 , a_ : str=0.0 , a_ : List[str]=0.0 , a_ : Dict=0.02 , a_ : Dict=0.0 , a_ : Dict=False , a_ : List[Any]=True , a_ : List[Any]=1 , a_ : Optional[int]=0 , a_ : int=2 , a_ : Tuple=True , a_ : Optional[Any]=2 , a_ : Any=2 , a_ : List[str]=False , a_ : List[Any]=100 , a_ : List[Any]=800 , **a_ : Tuple , )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = d_model
SCREAMING_SNAKE_CASE__ : Optional[Any] = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ : List[Any] = encoder_layers
SCREAMING_SNAKE_CASE__ : List[Any] = encoder_attention_heads
SCREAMING_SNAKE_CASE__ : Any = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = dropout
SCREAMING_SNAKE_CASE__ : Optional[int] = attention_dropout
SCREAMING_SNAKE_CASE__ : Optional[int] = activation_dropout
SCREAMING_SNAKE_CASE__ : Optional[int] = activation_function
SCREAMING_SNAKE_CASE__ : Optional[Any] = init_std
SCREAMING_SNAKE_CASE__ : Tuple = encoder_layerdrop
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE__ : Optional[Any] = classifier_dropout
SCREAMING_SNAKE_CASE__ : Dict = use_cache
SCREAMING_SNAKE_CASE__ : List[Any] = encoder_layers
SCREAMING_SNAKE_CASE__ : str = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE__ : List[Any] = use_prompt
SCREAMING_SNAKE_CASE__ : Any = prompt_length
SCREAMING_SNAKE_CASE__ : Any = prompt_mid_dim
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , is_encoder_decoder=a_ , decoder_start_token_id=a_ , forced_eos_token_id=a_ , **a_ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , a_ ):
SCREAMING_SNAKE_CASE__ : Dict = self.bos_token_id
warnings.warn(
F'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'The config can simply be saved and uploaded again to be fixed.' )
| 85 | def _a ( lowercase__ : int = 60_08_51_47_51_43 ):
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE__ : Dict = int(lowercase__ )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
SCREAMING_SNAKE_CASE__ : int = 2
SCREAMING_SNAKE_CASE__ : int = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
SCREAMING_SNAKE_CASE__ : str = i
while n % i == 0:
SCREAMING_SNAKE_CASE__ : List[Any] = n // i
i += 1
return int(lowercase__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85 | 1 |
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = 0
while number > 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = number % 10
sum_of_digits += last_digit
SCREAMING_SNAKE_CASE__ : Any = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def _a ( SCREAMING_SNAKE_CASE__ : int = 1_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = factorial(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = split_and_add(SCREAMING_SNAKE_CASE__ )
return result
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 700 |
class lowerCamelCase :
"""simple docstring"""
def __init__( self : str, _UpperCAmelCase : list ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = set_counts
SCREAMING_SNAKE_CASE__ : Dict = max(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = len(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = [1] * num_sets
SCREAMING_SNAKE_CASE__ : Optional[int] = list(range(_UpperCAmelCase ) )
def A_ ( self : Dict, _UpperCAmelCase : int, _UpperCAmelCase : int ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_parent(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_parent(_UpperCAmelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Dict = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
SCREAMING_SNAKE_CASE__ : Optional[int] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : str = src_parent
SCREAMING_SNAKE_CASE__ : int = self.set_counts[src_parent]
SCREAMING_SNAKE_CASE__ : Optional[int] = max(self.max_set, _UpperCAmelCase )
return True
def A_ ( self : int, _UpperCAmelCase : int ) -> int:
"""simple docstring"""
if self.parents[disj_set] == disj_set:
return disj_set
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 157 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_lowerCamelCase = (
{
'''feature-extraction''': TFMobileBertModel,
'''fill-mask''': TFMobileBertForMaskedLM,
'''question-answering''': TFMobileBertForQuestionAnswering,
'''text-classification''': TFMobileBertForSequenceClassification,
'''token-classification''': TFMobileBertForTokenClassification,
'''zero-shot''': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = False
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase=False ) -> Any:
'''simple docstring'''
snake_case_ : str = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class in get_values(_lowercase ):
snake_case_ : Optional[int] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=9_9 , _lowercase=3_2 , _lowercase=3_2 , _lowercase=2 , _lowercase=4 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=3 , _lowercase=4 , _lowercase=None , ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = parent
snake_case_ : List[str] = batch_size
snake_case_ : Union[str, Any] = seq_length
snake_case_ : List[str] = is_training
snake_case_ : Dict = use_input_mask
snake_case_ : int = use_token_type_ids
snake_case_ : List[Any] = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : List[Any] = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : List[Any] = intermediate_size
snake_case_ : int = hidden_act
snake_case_ : Any = hidden_dropout_prob
snake_case_ : List[str] = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : Dict = type_vocab_size
snake_case_ : str = type_sequence_label_size
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : str = num_labels
snake_case_ : Any = num_choices
snake_case_ : Optional[int] = scope
snake_case_ : Tuple = embedding_size
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : str = None
if self.use_input_mask:
snake_case_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : List[Any] = None
if self.use_token_type_ids:
snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Optional[int] = None
snake_case_ : Any = None
snake_case_ : Dict = None
if self.use_labels:
snake_case_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : Dict = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
snake_case_ : int = TFMobileBertModel(config=_lowercase )
snake_case_ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case_ : Optional[Any] = model(_lowercase )
snake_case_ : Dict = [input_ids, input_mask]
snake_case_ : Any = model(_lowercase )
snake_case_ : Any = model(_lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = TFMobileBertForMaskedLM(config=_lowercase )
snake_case_ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case_ : Optional[Any] = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> int:
'''simple docstring'''
snake_case_ : str = TFMobileBertForNextSentencePrediction(config=_lowercase )
snake_case_ : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case_ : List[str] = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = TFMobileBertForPreTraining(config=_lowercase )
snake_case_ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case_ : Dict = model(_lowercase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = self.num_labels
snake_case_ : Optional[Any] = TFMobileBertForSequenceClassification(config=_lowercase )
snake_case_ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case_ : Any = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.num_choices
snake_case_ : str = TFMobileBertForMultipleChoice(config=_lowercase )
snake_case_ : Union[str, Any] = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
snake_case_ : Any = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
snake_case_ : List[Any] = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
snake_case_ : Tuple = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
snake_case_ : Dict = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> str:
'''simple docstring'''
snake_case_ : Any = self.num_labels
snake_case_ : Dict = TFMobileBertForTokenClassification(config=_lowercase )
snake_case_ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case_ : Tuple = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = TFMobileBertForQuestionAnswering(config=_lowercase )
snake_case_ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case_ : Union[str, Any] = model(_lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) : List[str] = config_and_inputs
snake_case_ : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = TFMobileBertModelTest.TFMobileBertModelTester(self )
snake_case_ : List[Any] = ConfigTester(self , config_class=_lowercase , hidden_size=3_7 )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_lowercase )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_lowercase )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_lowercase )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_lowercase )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_lowercase )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_lowercase )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_lowercase )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_lowercase )
@slow
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
snake_case_ : int = TFMobileBertModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[int] = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" )
snake_case_ : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case_ : int = model(_lowercase )[0]
snake_case_ : str = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , _lowercase )
snake_case_ : int = tf.constant(
[
[
[-4.591_9547, -9.24_8295, -9.64_5256],
[-6.730_6175, -6.44_0284, -6.605_2837],
[-7.274_3506, -6.784_7915, -6.02_4673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowercase , atol=1E-4 )
| 58 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = OrderedDict(
[
("align", "EfficientNetImageProcessor"),
("beit", "BeitImageProcessor"),
("bit", "BitImageProcessor"),
("blip", "BlipImageProcessor"),
("blip-2", "BlipImageProcessor"),
("bridgetower", "BridgeTowerImageProcessor"),
("chinese_clip", "ChineseCLIPImageProcessor"),
("clip", "CLIPImageProcessor"),
("clipseg", "ViTImageProcessor"),
("conditional_detr", "ConditionalDetrImageProcessor"),
("convnext", "ConvNextImageProcessor"),
("convnextv2", "ConvNextImageProcessor"),
("cvt", "ConvNextImageProcessor"),
("data2vec-vision", "BeitImageProcessor"),
("deformable_detr", "DeformableDetrImageProcessor"),
("deit", "DeiTImageProcessor"),
("deta", "DetaImageProcessor"),
("detr", "DetrImageProcessor"),
("dinat", "ViTImageProcessor"),
("donut-swin", "DonutImageProcessor"),
("dpt", "DPTImageProcessor"),
("efficientformer", "EfficientFormerImageProcessor"),
("efficientnet", "EfficientNetImageProcessor"),
("flava", "FlavaImageProcessor"),
("focalnet", "BitImageProcessor"),
("git", "CLIPImageProcessor"),
("glpn", "GLPNImageProcessor"),
("groupvit", "CLIPImageProcessor"),
("imagegpt", "ImageGPTImageProcessor"),
("instructblip", "BlipImageProcessor"),
("layoutlmv2", "LayoutLMv2ImageProcessor"),
("layoutlmv3", "LayoutLMv3ImageProcessor"),
("levit", "LevitImageProcessor"),
("mask2former", "Mask2FormerImageProcessor"),
("maskformer", "MaskFormerImageProcessor"),
("mgp-str", "ViTImageProcessor"),
("mobilenet_v1", "MobileNetV1ImageProcessor"),
("mobilenet_v2", "MobileNetV2ImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevitv2", "MobileViTImageProcessor"),
("nat", "ViTImageProcessor"),
("oneformer", "OneFormerImageProcessor"),
("owlvit", "OwlViTImageProcessor"),
("perceiver", "PerceiverImageProcessor"),
("pix2struct", "Pix2StructImageProcessor"),
("poolformer", "PoolFormerImageProcessor"),
("regnet", "ConvNextImageProcessor"),
("resnet", "ConvNextImageProcessor"),
("sam", "SamImageProcessor"),
("segformer", "SegformerImageProcessor"),
("swiftformer", "ViTImageProcessor"),
("swin", "ViTImageProcessor"),
("swin2sr", "Swin2SRImageProcessor"),
("swinv2", "ViTImageProcessor"),
("table-transformer", "DetrImageProcessor"),
("timesformer", "VideoMAEImageProcessor"),
("tvlt", "TvltImageProcessor"),
("upernet", "SegformerImageProcessor"),
("van", "ConvNextImageProcessor"),
("videomae", "VideoMAEImageProcessor"),
("vilt", "ViltImageProcessor"),
("vit", "ViTImageProcessor"),
("vit_hybrid", "ViTHybridImageProcessor"),
("vit_mae", "ViTImageProcessor"),
("vit_msn", "ViTImageProcessor"),
("xclip", "CLIPImageProcessor"),
("yolos", "YolosImageProcessor"),
]
)
UpperCamelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def _UpperCAmelCase ( UpperCamelCase: str ):
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
__lowerCAmelCase = model_type_to_module_name(UpperCamelCase )
__lowerCAmelCase = importlib.import_module(F".{module_name}" , "transformers.models" )
try:
return getattr(UpperCamelCase , UpperCamelCase )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(UpperCamelCase , "__name__" , UpperCamelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__lowerCAmelCase = importlib.import_module("transformers" )
if hasattr(UpperCamelCase , UpperCamelCase ):
return getattr(UpperCamelCase , UpperCamelCase )
return None
def _UpperCAmelCase ( UpperCamelCase: Union[str, os.PathLike] , UpperCamelCase: Optional[Union[str, os.PathLike]] = None , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: Optional[Dict[str, str]] = None , UpperCamelCase: Optional[Union[bool, str]] = None , UpperCamelCase: Optional[str] = None , UpperCamelCase: bool = False , **UpperCamelCase: List[Any] , ):
"""simple docstring"""
__lowerCAmelCase = get_file_from_repo(
UpperCamelCase , UpperCamelCase , cache_dir=UpperCamelCase , force_download=UpperCamelCase , resume_download=UpperCamelCase , proxies=UpperCamelCase , use_auth_token=UpperCamelCase , revision=UpperCamelCase , local_files_only=UpperCamelCase , )
if resolved_config_file is None:
logger.info(
"Could not locate the image processor configuration file, will try to use the model config instead." )
return {}
with open(UpperCamelCase , encoding="utf-8" ) as reader:
return json.load(UpperCamelCase )
class a :
def __init__( self : Optional[Any] ):
"""simple docstring"""
raise EnvironmentError(
"AutoImageProcessor is designed to be instantiated "
"using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(snake_case__ )
def UpperCAmelCase__ ( cls : Tuple , snake_case__ : Dict , **snake_case__ : Any ):
"""simple docstring"""
__lowerCAmelCase = kwargs.pop("config" , snake_case__ )
__lowerCAmelCase = kwargs.pop("trust_remote_code" , snake_case__ )
__lowerCAmelCase = True
__lowerCAmelCase , __lowerCAmelCase = ImageProcessingMixin.get_image_processor_dict(snake_case__ , **snake_case__ )
__lowerCAmelCase = config_dict.get("image_processor_type" , snake_case__ )
__lowerCAmelCase = None
if "AutoImageProcessor" in config_dict.get("auto_map" , {} ):
__lowerCAmelCase = config_dict["auto_map"]["AutoImageProcessor"]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
__lowerCAmelCase = config_dict.pop("feature_extractor_type" , snake_case__ )
if feature_extractor_class is not None:
logger.warning(
"Could not find image processor class in the image processor config or the model config. Loading"
" based on pattern matching with the model's feature extractor configuration." )
__lowerCAmelCase = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" )
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
__lowerCAmelCase = config_dict["auto_map"]["AutoFeatureExtractor"]
__lowerCAmelCase = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" )
logger.warning(
"Could not find image processor auto map in the image processor config or the model config."
" Loading based on pattern matching with the model's feature extractor configuration." )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(snake_case__ , snake_case__ ):
__lowerCAmelCase = AutoConfig.from_pretrained(snake_case__ , **snake_case__ )
# It could be in `config.image_processor_type``
__lowerCAmelCase = getattr(snake_case__ , "image_processor_type" , snake_case__ )
if hasattr(snake_case__ , "auto_map" ) and "AutoImageProcessor" in config.auto_map:
__lowerCAmelCase = config.auto_map["AutoImageProcessor"]
if image_processor_class is not None:
__lowerCAmelCase = image_processor_class_from_name(snake_case__ )
__lowerCAmelCase = image_processor_auto_map is not None
__lowerCAmelCase = image_processor_class is not None or type(snake_case__ ) in IMAGE_PROCESSOR_MAPPING
__lowerCAmelCase = resolve_trust_remote_code(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if has_remote_code and trust_remote_code:
__lowerCAmelCase = get_class_from_dynamic_module(
snake_case__ , snake_case__ , **snake_case__ )
__lowerCAmelCase = kwargs.pop("code_revision" , snake_case__ )
if os.path.isdir(snake_case__ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(snake_case__ , **snake_case__ )
elif image_processor_class is not None:
return image_processor_class.from_dict(snake_case__ , **snake_case__ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(snake_case__ ) in IMAGE_PROCESSOR_MAPPING:
__lowerCAmelCase = IMAGE_PROCESSOR_MAPPING[type(snake_case__ )]
return image_processor_class.from_dict(snake_case__ , **snake_case__ )
raise ValueError(
F"Unrecognized image processor in {pretrained_model_name_or_path}. Should have a "
F"`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def UpperCAmelCase__ ( snake_case__ : str , snake_case__ : List[str] ):
"""simple docstring"""
IMAGE_PROCESSOR_MAPPING.register(snake_case__ , snake_case__ )
| 611 | 0 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__a = logging.getLogger(__name__)
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , ) ->Optional[int]:
UpperCAmelCase = bnb_quantization_config.load_in_abit
UpperCAmelCase = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
UpperCAmelCase = []
# custom device map
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(device_map.keys() ) > 1:
UpperCAmelCase = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
UpperCAmelCase = get_keys_to_not_convert(lowerCAmelCase_ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(lowerCAmelCase_ )
UpperCAmelCase = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
UpperCAmelCase = []
UpperCAmelCase = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(lowerCAmelCase_ )
# compatibility with peft
UpperCAmelCase = load_in_abit
UpperCAmelCase = load_in_abit
UpperCAmelCase = get_parameter_device(lowerCAmelCase_ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
UpperCAmelCase = replace_with_bnb_layers(lowerCAmelCase_ , lowerCAmelCase_ , modules_to_not_convert=lowerCAmelCase_ )
# convert param to the right dtype
UpperCAmelCase = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
UpperCAmelCase = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
UpperCAmelCase = getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(lowerCAmelCase_ ):
param.to(lowerCAmelCase_ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
F"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
UpperCAmelCase = replace_with_bnb_layers(
lowerCAmelCase_ , lowerCAmelCase_ , modules_to_not_convert=lowerCAmelCase_ )
UpperCAmelCase = get_quantized_model_device_map(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , max_memory=lowerCAmelCase_ , no_split_module_classes=lowerCAmelCase_ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
UpperCAmelCase = True
UpperCAmelCase = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , dtype=bnb_quantization_config.torch_dtype , offload_folder=lowerCAmelCase_ , offload_state_dict=lowerCAmelCase_ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(lowerCAmelCase_ , device_map=lowerCAmelCase_ , offload_dir=lowerCAmelCase_ )
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None ) ->Tuple:
if device_map is None:
if torch.cuda.is_available():
UpperCAmelCase = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
UpperCAmelCase = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
UpperCAmelCase = {}
UpperCAmelCase = special_dtypes
UpperCAmelCase = no_split_module_classes
UpperCAmelCase = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
UpperCAmelCase = get_balanced_memory(
lowerCAmelCase_ , low_zero=(device_map == """balanced_low_0""") , max_memory=lowerCAmelCase_ , **lowerCAmelCase_ , )
UpperCAmelCase = max_memory
UpperCAmelCase = infer_auto_device_map(lowerCAmelCase_ , **lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
# check if don't have any quantized module on the cpu
UpperCAmelCase = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
UpperCAmelCase = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None ) ->Optional[int]:
if modules_to_not_convert is None:
UpperCAmelCase = []
UpperCAmelCase , UpperCAmelCase = _replace_with_bnb_layers(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) ->Optional[int]:
UpperCAmelCase = False
for name, module in model.named_children():
if current_key_name is None:
UpperCAmelCase = []
current_key_name.append(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
UpperCAmelCase = """.""".join(lowerCAmelCase_ )
UpperCAmelCase = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
UpperCAmelCase = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
UpperCAmelCase = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=lowerCAmelCase_ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
UpperCAmelCase = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
UpperCAmelCase = module.weight.data
if module.bias is not None:
UpperCAmelCase = module.bias.data
bnb_module.requires_grad_(lowerCAmelCase_ )
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase = True
if len(list(module.children() ) ) > 0:
UpperCAmelCase , UpperCAmelCase = _replace_with_bnb_layers(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _UpperCamelCase ( lowerCAmelCase_ ) ->Optional[Any]:
# Create a copy of the model
with init_empty_weights():
UpperCAmelCase = deepcopy(lowerCAmelCase_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
UpperCAmelCase = find_tied_parameters(lowerCAmelCase_ )
# For compatibility with Accelerate < 0.18
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCAmelCase = sum(lowerCAmelCase_ , [] )
UpperCAmelCase = len(lowerCAmelCase_ ) > 0
# Check if it is a base model
UpperCAmelCase = False
if hasattr(lowerCAmelCase_ , """base_model_prefix""" ):
UpperCAmelCase = not hasattr(lowerCAmelCase_ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCAmelCase = list(model.named_children() )
UpperCAmelCase = [list_modules[-1][0]]
# add last module together with tied weights
UpperCAmelCase = set(lowerCAmelCase_ ) - set(lowerCAmelCase_ )
UpperCAmelCase = list(set(lowerCAmelCase_ ) ) + list(lowerCAmelCase_ )
# remove ".weight" from the keys
UpperCAmelCase = [""".weight""", """.bias"""]
UpperCAmelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCAmelCase = name.replace(lowerCAmelCase_ , """""" )
filtered_module_names.append(lowerCAmelCase_ )
return filtered_module_names
def _UpperCamelCase ( lowerCAmelCase_ ) ->List[str]:
for m in model.modules():
if isinstance(lowerCAmelCase_ , bnb.nn.Linearabit ):
return True
return False
def _UpperCamelCase ( lowerCAmelCase_ ) ->str:
return next(parameter.parameters() ).device
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ->Any:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(lowerCAmelCase_ , lowerCAmelCase_ , 0 , dtype=lowerCAmelCase_ , value=lowerCAmelCase_ )
UpperCAmelCase = param_name
UpperCAmelCase = model
if "." in tensor_name:
UpperCAmelCase = tensor_name.split(""".""" )
for split in splits[:-1]:
UpperCAmelCase = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
UpperCAmelCase = new_module
UpperCAmelCase = splits[-1]
# offload weights
UpperCAmelCase = False
offload_weight(module._parameters[tensor_name] , lowerCAmelCase_ , lowerCAmelCase_ , index=lowerCAmelCase_ )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , lowerCAmelCase_ , index=lowerCAmelCase_ , )
else:
offload_weight(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , index=lowerCAmelCase_ )
offload_weight(lowerCAmelCase_ , param_name.replace("""weight""" , """SCB""" ) , lowerCAmelCase_ , index=lowerCAmelCase_ )
set_module_tensor_to_device(lowerCAmelCase_ , lowerCAmelCase_ , """meta""" , dtype=lowerCAmelCase_ , value=torch.empty(*param.size() ) )
| 716 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
__a = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
__a = """sshleifer/student_marian_en_ro_6_1"""
__a = """sshleifer/tiny-mbart"""
@require_torch
class __lowercase ( __snake_case ):
def _lowercase ( self : Dict , __lowerCamelCase : List[Any]=False , __lowerCamelCase : str=None , __lowerCamelCase : Any=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : str=True , __lowerCamelCase : List[str]=True , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.run_trainer(
eval_steps=1 , max_len=1_2 , model_name=__lowerCamelCase , num_train_epochs=1 , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , predict_with_generate=__lowerCamelCase , do_train=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , )
UpperCAmelCase = TrainerState.load_from_json(os.path.join(__lowerCamelCase , """trainer_state.json""" ) ).log_history
if not do_eval:
return
UpperCAmelCase = [log for log in logs if """eval_loss""" in log.keys()]
UpperCAmelCase = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
UpperCAmelCase = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , __lowerCamelCase )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def _lowercase ( self : Dict ) -> str:
"""simple docstring"""
self.run_seqaseq_quick()
@require_torch_multi_gpu
def _lowercase ( self : Tuple ) -> Any:
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCamelCase )
@require_torch_multi_gpu
def _lowercase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCamelCase )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def _lowercase ( self : Dict ) -> Tuple:
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=__lowerCamelCase )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def _lowercase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
self.run_seqaseq_quick(
distributed=__lowerCamelCase , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=__lowerCamelCase )
@require_apex
@require_torch_gpu
def _lowercase ( self : str ) -> Optional[Any]:
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def _lowercase ( self : Union[str, Any] , __lowerCamelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
UpperCAmelCase = experiments[experiment_id]
UpperCAmelCase = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
UpperCAmelCase = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__lowerCamelCase , extra_args_str=data["""extra_args_str"""] )
UpperCAmelCase = len(re.findall(__lowerCamelCase , cl.err ) )
self.assertEqual(__lowerCamelCase , data["""n_matches"""] )
@slow
def _lowercase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase = self.run_trainer(
eval_steps=2 , max_len=1_2_8 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=1_0 , distributed=__lowerCamelCase , )
# Check metrics
UpperCAmelCase = TrainerState.load_from_json(os.path.join(__lowerCamelCase , """trainer_state.json""" ) ).log_history
UpperCAmelCase = [log for log in logs if """eval_loss""" in log.keys()]
UpperCAmelCase = eval_metrics[0]
UpperCAmelCase = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , __lowerCamelCase )
# test if do_predict saves generations and metrics
UpperCAmelCase = os.listdir(__lowerCamelCase )
UpperCAmelCase = {os.path.basename(__lowerCamelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def _lowercase ( self : str ) -> int:
"""simple docstring"""
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__lowerCamelCase : str ) -> Tuple[int, float]:
UpperCAmelCase = """--skip_memory_metrics 0"""
UpperCAmelCase = self.run_trainer(
max_len=1_2_8 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=__lowerCamelCase , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , n_gpus_to_use=1 , )
# Check metrics
UpperCAmelCase = TrainerState.load_from_json(Path(__lowerCamelCase , """trainer_state.json""" ) ).log_history
UpperCAmelCase = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**2_0 )
UpperCAmelCase = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**2_0 )
UpperCAmelCase = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
UpperCAmelCase = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
UpperCAmelCase = gpu_peak_mem_orig + gpu_alloc_mem_orig
UpperCAmelCase = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
UpperCAmelCase = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
UpperCAmelCase = 1_2_0
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__lowerCamelCase , __lowerCamelCase , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
F""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"""
F""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , )
self.assertGreater(
__lowerCamelCase , __lowerCamelCase , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
F""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"""
F""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , )
self.assertEqual(
__lowerCamelCase , __lowerCamelCase , F"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" )
def _lowercase ( self : Any , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : float = 3e-3 , __lowerCamelCase : str = "adafactor" , __lowerCamelCase : bool = False , __lowerCamelCase : str = None , __lowerCamelCase : int = 0 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : int = None , ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = F"""
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__lowerCamelCase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__lowerCamelCase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
""".split()
UpperCAmelCase = F"""
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__lowerCamelCase )}
""".split()
UpperCAmelCase = """
--do_predict
""".split()
UpperCAmelCase = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F"""--optim {optim}""".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
UpperCAmelCase = get_gpu_count()
UpperCAmelCase = get_torch_dist_unique_port()
UpperCAmelCase = F"""
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
""".split()
UpperCAmelCase = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__lowerCamelCase , env=self.get_env() )
else:
UpperCAmelCase = ["""run_translation.py"""] + args
with patch.object(__lowerCamelCase , """argv""" , __lowerCamelCase ):
main()
return output_dir
| 627 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a = {
'configuration_bridgetower': [
'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BridgeTowerConfig',
'BridgeTowerTextConfig',
'BridgeTowerVisionConfig',
],
'processing_bridgetower': ['BridgeTowerProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['BridgeTowerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST',
'BridgeTowerForContrastiveLearning',
'BridgeTowerForImageAndTextRetrieval',
'BridgeTowerForMaskedLM',
'BridgeTowerModel',
'BridgeTowerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 412 |
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = len(UpperCAmelCase__ )
lowercase_ = [[0] * n for i in range(UpperCAmelCase__ )]
for i in range(UpperCAmelCase__ ):
lowercase_ = y_points[i]
for i in range(2 , UpperCAmelCase__ ):
for j in range(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 412 | 1 |
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class _a :
'''simple docstring'''
@staticmethod
def UpperCamelCase_ ( *A, **A ):
'''simple docstring'''
pass
def lowercase__( __UpperCamelCase: Image ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def lowercase__( __UpperCamelCase: Image ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = np.array(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Tuple = npimg.shape
return {"hash": hashimage(__UpperCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class _a ( unittest.TestCase ):
'''simple docstring'''
A : str = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
A : str = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = MaskGenerationPipeline(model=A, image_processor=A )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@slow
@require_torch
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = pipeline('mask-generation', model='facebook/sam-vit-huge' )
SCREAMING_SNAKE_CASE : Union[str, Any] = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg', points_per_batch=256 )
# Shortening by hashing
SCREAMING_SNAKE_CASE : Any = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(A ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(A, decimals=4 ), [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.04_44},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_21},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.01_67},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.01_32},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.00_53},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.99_67},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.9_93},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.99_09},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.98_79},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.98_34},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.97_16},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.96_12},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.95_99},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.95_52},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.95_32},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.95_16},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.94_99},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.94_83},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.94_64},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.9_43},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.9_43},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.94_08},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.93_35},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.93_26},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.92_62},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.89_99},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.89_86},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.89_84},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.88_73},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.88_71}
], )
# fmt: on
@require_torch
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 'facebook/sam-vit-huge'
SCREAMING_SNAKE_CASE : int = pipeline('mask-generation', model=A )
SCREAMING_SNAKE_CASE : Optional[Any] = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg', pred_iou_thresh=1, points_per_batch=256 )
# Shortening by hashing
SCREAMING_SNAKE_CASE : List[Any] = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(A ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(A, decimals=4 ), [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.04_44},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.02_10},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.01_67},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.01_32},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.00_53},
], )
| 508 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = ['''pixel_values''']
def __init__( self, A = True, A = None, A = PILImageResampling.BICUBIC, A = True, A = True, A = 1 / 255, A = None, A = True, A = None, A = None, **A, ):
'''simple docstring'''
super().__init__(**A )
SCREAMING_SNAKE_CASE : Any = size if size is not None else {'height': 224, 'width': 224}
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(A )
SCREAMING_SNAKE_CASE : Any = crop_size if crop_size is not None else {'height': 224, 'width': 224}
SCREAMING_SNAKE_CASE : str = get_size_dict(A, default_to_square=A, param_name='crop_size' )
SCREAMING_SNAKE_CASE : Union[str, Any] = do_resize
SCREAMING_SNAKE_CASE : Union[str, Any] = do_rescale
SCREAMING_SNAKE_CASE : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE : List[Any] = do_center_crop
SCREAMING_SNAKE_CASE : Union[str, Any] = crop_size
SCREAMING_SNAKE_CASE : Union[str, Any] = size
SCREAMING_SNAKE_CASE : int = resample
SCREAMING_SNAKE_CASE : Any = rescale_factor
SCREAMING_SNAKE_CASE : Optional[int] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
SCREAMING_SNAKE_CASE : List[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase_ ( self, A, A, A = PILImageResampling.BILINEAR, A = None, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(A )
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE : Any = get_resize_output_image_size(A, size=size['shortest_edge'], default_to_square=A )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
SCREAMING_SNAKE_CASE : Union[str, Any] = (size['height'], size['width'])
else:
raise ValueError(F"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" )
return resize(A, size=A, resample=A, data_format=A, **A )
def UpperCamelCase_ ( self, A, A, A = None, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(A, size=(size['height'], size['width']), data_format=A, **A )
def UpperCamelCase_ ( self, A, A, A = None, **A ):
'''simple docstring'''
return rescale(A, scale=A, data_format=A, **A )
def UpperCamelCase_ ( self, A, A, A, A = None, **A, ):
'''simple docstring'''
return normalize(A, mean=A, std=A, data_format=A, **A )
def UpperCamelCase_ ( self, A, A = None, A = None, A = None, A = None, A = None, A = None, A = None, A = None, A = None, A = None, A = None, A = ChannelDimension.FIRST, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : int = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE : Optional[Any] = get_size_dict(A, param_name='crop_size', default_to_square=A )
SCREAMING_SNAKE_CASE : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : int = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : List[Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : Optional[int] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Dict = get_size_dict(A )
if not is_batched(A ):
SCREAMING_SNAKE_CASE : List[Any] = [images]
if not valid_images(A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Optional[int] = [to_numpy_array(A ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.resize(image=A, size=A, resample=A ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE : List[Any] = [self.center_crop(image=A, size=A ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : Optional[Any] = [self.rescale(image=A, scale=A ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : Any = [self.normalize(image=A, mean=A, std=A ) for image in images]
SCREAMING_SNAKE_CASE : List[Any] = [to_channel_dimension_format(A, A ) for image in images]
SCREAMING_SNAKE_CASE : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=A, tensor_type=A )
| 508 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : Union[str, Any] = logging.get_logger(__name__)
a_ : Union[str, Any] = {
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class _snake_case ( A__ ):
_lowercase : Optional[int] = '''levit'''
def __init__( self , a=224 , a=3 , a=3 , a=2 , a=1 , a=16 , a=[128, 256, 384] , a=[4, 8, 12] , a=[4, 4, 4] , a=[16, 16, 16] , a=0 , a=[2, 2, 2] , a=[2, 2, 2] , a=0.02 , **a , ) -> List[Any]:
super().__init__(**a)
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = kernel_size
SCREAMING_SNAKE_CASE = stride
SCREAMING_SNAKE_CASE = padding
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = key_dim
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = attention_ratio
SCREAMING_SNAKE_CASE = mlp_ratio
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class _snake_case ( A__ ):
_lowercase : int = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def SCREAMING_SNAKE_CASE__ ( self) -> float:
return 1E-4
| 73 |
'''simple docstring'''
import socket
def lowerCAmelCase__ ( ):
_A : Dict = socket.socket(socket.AF_INET ,socket.SOCK_STREAM )
_A : List[Any] = socket.gethostname()
_A : List[str] = 12312
sock.connect((host, port) )
sock.send(b'Hello server!' )
with open('Received_file' ,'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
_A : Optional[int] = sock.recv(1024 )
if not data:
break
out_file.write(lowerCamelCase )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 128 | 0 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__: str = logging.get_logger(__name__)
A__: Dict = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class _a ( _lowercase):
"""simple docstring"""
UpperCamelCase__ = '''conditional_detr'''
UpperCamelCase__ = ['''past_key_values''']
UpperCamelCase__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self: Optional[int] , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Optional[Any]=None , __lowerCamelCase: List[str]=3 , __lowerCamelCase: str=300 , __lowerCamelCase: List[Any]=6 , __lowerCamelCase: List[str]=2048 , __lowerCamelCase: int=8 , __lowerCamelCase: str=6 , __lowerCamelCase: str=2048 , __lowerCamelCase: Union[str, Any]=8 , __lowerCamelCase: str=0.0 , __lowerCamelCase: List[str]=0.0 , __lowerCamelCase: Tuple=True , __lowerCamelCase: int="relu" , __lowerCamelCase: int=256 , __lowerCamelCase: Any=0.1 , __lowerCamelCase: List[str]=0.0 , __lowerCamelCase: Optional[int]=0.0 , __lowerCamelCase: int=0.02 , __lowerCamelCase: str=1.0 , __lowerCamelCase: List[Any]=False , __lowerCamelCase: List[Any]="sine" , __lowerCamelCase: Optional[Any]="resnet50" , __lowerCamelCase: Any=True , __lowerCamelCase: Union[str, Any]=False , __lowerCamelCase: List[str]=2 , __lowerCamelCase: Tuple=5 , __lowerCamelCase: Tuple=2 , __lowerCamelCase: int=1 , __lowerCamelCase: Optional[int]=1 , __lowerCamelCase: Optional[Any]=2 , __lowerCamelCase: Union[str, Any]=5 , __lowerCamelCase: Any=2 , __lowerCamelCase: Any=0.25 , **__lowerCamelCase: Any , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can\'t specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCamelCase__: List[Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(A_ , A_ ):
UpperCamelCase__: Tuple = backbone_config.get("model_type" )
UpperCamelCase__: str = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase__: List[str] = config_class.from_dict(A_ )
UpperCamelCase__: Union[str, Any] = use_timm_backbone
UpperCamelCase__: Tuple = backbone_config
UpperCamelCase__: List[Any] = num_channels
UpperCamelCase__: Optional[int] = num_queries
UpperCamelCase__: Optional[Any] = d_model
UpperCamelCase__: str = encoder_ffn_dim
UpperCamelCase__: str = encoder_layers
UpperCamelCase__: List[Any] = encoder_attention_heads
UpperCamelCase__: Any = decoder_ffn_dim
UpperCamelCase__: str = decoder_layers
UpperCamelCase__: Tuple = decoder_attention_heads
UpperCamelCase__: Dict = dropout
UpperCamelCase__: str = attention_dropout
UpperCamelCase__: Dict = activation_dropout
UpperCamelCase__: Union[str, Any] = activation_function
UpperCamelCase__: Optional[int] = init_std
UpperCamelCase__: Optional[int] = init_xavier_std
UpperCamelCase__: Tuple = encoder_layerdrop
UpperCamelCase__: List[str] = decoder_layerdrop
UpperCamelCase__: Optional[int] = encoder_layers
UpperCamelCase__: List[str] = auxiliary_loss
UpperCamelCase__: Optional[int] = position_embedding_type
UpperCamelCase__: Optional[int] = backbone
UpperCamelCase__: Optional[Any] = use_pretrained_backbone
UpperCamelCase__: Dict = dilation
# Hungarian matcher
UpperCamelCase__: str = class_cost
UpperCamelCase__: Tuple = bbox_cost
UpperCamelCase__: List[str] = giou_cost
# Loss coefficients
UpperCamelCase__: int = mask_loss_coefficient
UpperCamelCase__: List[Any] = dice_loss_coefficient
UpperCamelCase__: Tuple = cls_loss_coefficient
UpperCamelCase__: Union[str, Any] = bbox_loss_coefficient
UpperCamelCase__: Union[str, Any] = giou_loss_coefficient
UpperCamelCase__: Tuple = focal_alpha
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
return self.d_model
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCamelCase__: List[Any] = self.backbone_config.to_dict()
UpperCamelCase__: int = self.__class__.model_type
return output
class _a ( _lowercase):
"""simple docstring"""
UpperCamelCase__ = version.parse("""1.11""")
@property
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
return 1e-5
@property
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
return 12
| 700 |
def lowerCAmelCase_ ( A_ = 50):
UpperCamelCase__: Optional[int] = [1] * (length + 1)
for row_length in range(length + 1):
for tile_length in range(2 ,5):
for tile_start in range(row_length - tile_length + 1):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f"{solution() = }")
| 221 | 0 |
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
UpperCAmelCase = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b"
UpperCAmelCase = str(bin(UpperCamelCase__ ) )[2:]
UpperCAmelCase = max(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(UpperCamelCase__ ) , b_binary.zfill(UpperCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 130 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A_ (a_ , unittest.TestCase ):
UpperCAmelCase__ = KandinskyVaaImgaImgPipeline
UpperCAmelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''image''']
UpperCAmelCase__ = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
UpperCAmelCase__ = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCAmelCase__ = False
@property
def _lowercase ( self ):
'''simple docstring'''
return 3_2
@property
def _lowercase ( self ):
'''simple docstring'''
return 3_2
@property
def _lowercase ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def _lowercase ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowercase ( self ):
'''simple docstring'''
return 1_0_0
@property
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
UpperCAmelCase = UNetaDConditionModel(**_A )
return model
@property
def _lowercase ( self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.dummy_unet
UpperCAmelCase = self.dummy_movq
UpperCAmelCase = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_00_85,
'''beta_end''': 0.0_12,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
UpperCAmelCase = DDIMScheduler(**_A )
UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _lowercase ( self , _A , _A=0 ):
'''simple docstring'''
UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_A )
# create init_image
UpperCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(_A )
else:
UpperCAmelCase = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 1_0,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = '''cpu'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**_A )
UpperCAmelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = pipe(**self.get_dummy_inputs(_A ) )
UpperCAmelCase = output.images
UpperCAmelCase = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase = np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
UpperCAmelCase = '''A red cartoon frog, 4k'''
UpperCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_A )
UpperCAmelCase = KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
UpperCAmelCase = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase , UpperCAmelCase = pipe_prior(
_A , generator=_A , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
UpperCAmelCase = pipeline(
image=_A , image_embeds=_A , negative_image_embeds=_A , generator=_A , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type='''np''' , )
UpperCAmelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_A , _A )
| 130 | 1 |
'''simple docstring'''
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
__lowerCamelCase : str = TypeVar("T")
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
return (position - 1) // 2
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
return (2 * position) + 1
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
return (2 * position) + 2
class UpperCAmelCase ( Generic[T] ):
def __init__(self : List[str] ) -> None:
lowercase = []
lowercase = {}
lowercase = 0
def __len__(self : Tuple ) -> int:
return self.elements
def __repr__(self : Optional[int] ) -> str:
return str(self.heap )
def UpperCAmelCase__ (self : Optional[Any] ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def UpperCAmelCase__ (self : str , A__ : T , A__ : int ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
lowercase = self.elements
self.elements += 1
self._bubble_up(A__ )
def UpperCAmelCase__ (self : Union[str, Any] ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
lowercase , lowercase = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
lowercase , lowercase = self.heap[0]
self._bubble_down(A__ )
return elem
def UpperCAmelCase__ (self : Optional[int] , A__ : T , A__ : int ) -> None:
# Update the weight of the given key
lowercase = self.position_map[elem]
lowercase = (elem, weight)
if position > 0:
lowercase = get_parent_position(A__ )
lowercase , lowercase = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(A__ )
else:
self._bubble_down(A__ )
else:
self._bubble_down(A__ )
def UpperCAmelCase__ (self : str , A__ : T ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
lowercase = self.position_map[elem]
if curr_pos == 0:
return None
lowercase = get_parent_position(A__ )
lowercase , lowercase = self.heap[curr_pos]
lowercase , lowercase = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(A__ , A__ )
return self._bubble_up(A__ )
return None
def UpperCAmelCase__ (self : Dict , A__ : T ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
lowercase = self.position_map[elem]
lowercase , lowercase = self.heap[curr_pos]
lowercase = get_child_left_position(A__ )
lowercase = get_child_right_position(A__ )
if child_left_position < self.elements and child_right_position < self.elements:
lowercase , lowercase = self.heap[child_left_position]
lowercase , lowercase = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(A__ , A__ )
return self._bubble_down(A__ )
if child_left_position < self.elements:
lowercase , lowercase = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(A__ , A__ )
return self._bubble_down(A__ )
else:
return None
if child_right_position < self.elements:
lowercase , lowercase = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(A__ , A__ )
return self._bubble_down(A__ )
return None
def UpperCAmelCase__ (self : Union[str, Any] , A__ : int , A__ : int ) -> None:
# Swap the nodes at the given positions
lowercase = self.heap[nodea_pos][0]
lowercase = self.heap[nodea_pos][0]
lowercase , lowercase = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
lowercase = nodea_pos
lowercase = nodea_pos
class UpperCAmelCase ( Generic[T] ):
def __init__(self : List[Any] ) -> None:
lowercase = {}
lowercase = 0
def __repr__(self : Any ) -> str:
return str(self.connections )
def __len__(self : Dict ) -> int:
return self.nodes
def UpperCAmelCase__ (self : Any , A__ : T ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
lowercase = {}
self.nodes += 1
def UpperCAmelCase__ (self : Dict , A__ : T , A__ : T , A__ : int ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(A__ )
self.add_node(A__ )
lowercase = weight
lowercase = weight
def UpperCAmelCase_ ( lowerCAmelCase_ , ):
"""simple docstring"""
lowercase = {node: maxsize for node in graph.connections}
lowercase = {node: None for node in graph.connections}
lowercase = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(lowerCAmelCase_ , lowerCAmelCase_ )
if priority_queue.is_empty():
return dist, parent
# initialization
lowercase = priority_queue.extract_min()
lowercase = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowercase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase_ , dist[neighbour] )
lowercase = node
# running prim's algorithm
while not priority_queue.is_empty():
lowercase = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowercase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase_ , dist[neighbour] )
lowercase = node
return dist, parent
| 459 |
'''simple docstring'''
import functools
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not all(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(lowerCAmelCase_ ) != 3 or not all(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(lowerCAmelCase_ ) == 0:
return 0
if min(lowerCAmelCase_ ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(lowerCAmelCase_ ) >= 366:
raise ValueError("All days elements should be less than 366" )
lowercase = set(lowerCAmelCase_ )
@functools.cache
def dynamic_programming(lowerCAmelCase_ ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 459 | 1 |
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowercase_ = float('''nan''')
class __a :
def __init__( self : Union[str, Any] , snake_case_ : Tuple)-> Optional[int]:
__lowerCAmelCase =sys.stdout
__lowerCAmelCase =open(_SCREAMING_SNAKE_CASE , """a""")
def __getattr__( self : Optional[int] , snake_case_ : Any)-> str:
return getattr(self.stdout , _SCREAMING_SNAKE_CASE)
def UpperCamelCase ( self : Union[str, Any] , snake_case_ : Dict)-> str:
self.stdout.write(_SCREAMING_SNAKE_CASE)
# strip tqdm codes
self.file.write(re.sub(R"""^.*\r""" , """""" , _SCREAMING_SNAKE_CASE , 0 , re.M))
def __lowerCAmelCase ( __lowerCamelCase : Union[str, Any]=80 , __lowerCamelCase : Optional[int]=False ) -> str:
__lowerCAmelCase =[]
# deal with critical env vars
__lowerCAmelCase =["""CUDA_VISIBLE_DEVICES"""]
for key in env_keys:
__lowerCAmelCase =os.environ.get(__lowerCamelCase , __lowerCamelCase )
if val is not None:
cmd.append(f"""{key}={val}""" )
# python executable (not always needed if the script is executable)
__lowerCAmelCase =sys.executable if full_python_path else sys.executable.split("""/""" )[-1]
cmd.append(__lowerCamelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
__lowerCAmelCase =[]
__lowerCAmelCase =""""""
while len(__lowerCamelCase ) > 0:
current_line += f"""{cmd.pop(0 )} """
if len(__lowerCamelCase ) == 0 or len(__lowerCamelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(__lowerCamelCase )
__lowerCAmelCase =""""""
return "\\\n".join(__lowerCamelCase )
def __lowerCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Tuple ) -> str:
# unwrap multi-line input
__lowerCAmelCase =re.sub(r"""[\\\n]+""" , """ """ , args.base_cmd )
# remove --output_dir if any and set our own
__lowerCAmelCase =re.sub("""--output_dir\s+[^\s]+""" , """""" , args.base_cmd )
args.base_cmd += f""" --output_dir {output_dir}"""
# ensure we have --overwrite_output_dir
__lowerCAmelCase =re.sub("""--overwrite_output_dir\s+""" , """""" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def __lowerCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ) -> List[str]:
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , )
__lowerCAmelCase =subprocess.run(__lowerCamelCase , capture_output=__lowerCamelCase , text=__lowerCamelCase )
if verbose:
print("""STDOUT""" , result.stdout )
print("""STDERR""" , result.stderr )
# save the streams
__lowerCAmelCase =variation.replace(""" """ , """-""" )
with open(Path(__lowerCamelCase ) / f"""log.{prefix}.stdout.txt""" , """w""" ) as f:
f.write(result.stdout )
with open(Path(__lowerCamelCase ) / f"""log.{prefix}.stderr.txt""" , """w""" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("""failed""" )
return {target_metric_key: nan}
with io.open(f"""{output_dir}/all_results.json""" , """r""" , encoding="""utf-8""" ) as f:
__lowerCAmelCase =json.load(__lowerCamelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def __lowerCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , ) -> Optional[int]:
__lowerCAmelCase =[]
__lowerCAmelCase =[]
__lowerCAmelCase =f"""{id}: {variation:<{longest_variation_len}}"""
__lowerCAmelCase =f"""{preamble}: """
__lowerCAmelCase =set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(__lowerCamelCase ) , desc=__lowerCamelCase , leave=__lowerCamelCase ):
__lowerCAmelCase =process_run_single(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__lowerCAmelCase =single_run_metrics[target_metric_key]
if not math.isnan(__lowerCamelCase ):
metrics.append(__lowerCamelCase )
results.append(__lowerCamelCase )
outcome += "✓"
else:
outcome += "✘"
__lowerCAmelCase =f"""\33[2K\r{outcome}"""
if len(__lowerCamelCase ) > 0:
__lowerCAmelCase ={k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
__lowerCAmelCase =round(mean_metrics[target_metric_key] , 2 )
__lowerCAmelCase =f"""{outcome} {mean_target}"""
if len(__lowerCamelCase ) > 1:
results_str += f""" {tuple(round(__lowerCamelCase , 2 ) for x in results )}"""
print(__lowerCamelCase )
__lowerCAmelCase =variation
return mean_metrics
else:
print(__lowerCamelCase )
return {variation_key: variation, target_metric_key: nan}
def __lowerCAmelCase ( ) -> Union[str, Any]:
__lowerCAmelCase =torch.cuda.get_device_properties(torch.device("""cuda""" ) )
return f"""\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n"""
def __lowerCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple ) -> Optional[Any]:
__lowerCAmelCase =pd.DataFrame(__lowerCamelCase )
__lowerCAmelCase ="""variation"""
__lowerCAmelCase ="""diff_%"""
__lowerCAmelCase =nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
__lowerCAmelCase =df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(__lowerCamelCase ):
# as a fallback, use the minimal value as the sentinel
__lowerCAmelCase =df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(__lowerCamelCase ):
__lowerCAmelCase =df.apply(
lambda __lowerCamelCase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="""columns""" , )
# re-order columns
__lowerCAmelCase =[variation_key, target_metric_key, diff_key, *report_metric_keys]
__lowerCAmelCase =df.reindex(__lowerCamelCase , axis="""columns""" ) # reorder cols
# capitalize
__lowerCAmelCase =df.rename(str.capitalize , axis="""columns""" )
# make the cols as narrow as possible
__lowerCAmelCase =df.rename(lambda __lowerCamelCase : c.replace("""_""" , """<br>""" ) , axis="""columns""" )
__lowerCAmelCase =df.rename(lambda __lowerCamelCase : c.replace("""_""" , """\n""" ) , axis="""columns""" )
__lowerCAmelCase =["""""", """Copy between the cut-here-lines and paste as is to github or a forum"""]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=__lowerCamelCase , floatfmt=""".2f""" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=__lowerCamelCase , floatfmt=""".2f""" )]
print("""\n\n""".join(__lowerCamelCase ) )
def __lowerCAmelCase ( ) -> Union[str, Any]:
__lowerCAmelCase =argparse.ArgumentParser()
parser.add_argument(
"""--base-cmd""" , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help="""Base cmd""" , )
parser.add_argument(
"""--variations""" , default=__lowerCamelCase , type=__lowerCamelCase , nargs="""+""" , required=__lowerCamelCase , help="""Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'""" , )
parser.add_argument(
"""--base-variation""" , default=__lowerCamelCase , type=__lowerCamelCase , help="""Baseline variation to compare to. if None the minimal target value will be used to compare against""" , )
parser.add_argument(
"""--target-metric-key""" , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help="""Target metric key in output_dir/all_results.json, e.g., train_samples_per_second""" , )
parser.add_argument(
"""--report-metric-keys""" , default="""""" , type=__lowerCamelCase , help="""Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples""" , )
parser.add_argument(
"""--repeat-times""" , default=1 , type=__lowerCamelCase , help="""How many times to re-run each variation - an average will be reported""" , )
parser.add_argument(
"""--output_dir""" , default="""output_benchmark""" , type=__lowerCamelCase , help="""The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked""" , )
parser.add_argument(
"""--verbose""" , default=__lowerCamelCase , action="""store_true""" , help="""Whether to show the outputs of each run or just the benchmark progress""" , )
__lowerCAmelCase =parser.parse_args()
__lowerCAmelCase =args.output_dir
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
__lowerCAmelCase =get_base_command(__lowerCamelCase , __lowerCamelCase )
# split each dimension into its --foo variations
__lowerCAmelCase =[list(map(str.strip , re.split(r"""\|""" , __lowerCamelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
__lowerCAmelCase =list(map(str.strip , map(""" """.join , itertools.product(*__lowerCamelCase ) ) ) )
__lowerCAmelCase =max(len(__lowerCamelCase ) for x in variations )
# split wanted keys
__lowerCAmelCase =args.report_metric_keys.split()
# capture prints into a log file for convenience
__lowerCAmelCase =f"""benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"""
print(f"""\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt""" )
print(f"""and this script's output is also piped into {report_fn}""" )
__lowerCAmelCase =Tee(__lowerCamelCase )
print(f"""\n*** Running {len(__lowerCamelCase )} benchmarks:""" )
print(f"""Base command: {' '.join(__lowerCamelCase )}""" )
__lowerCAmelCase ="""variation"""
__lowerCAmelCase =[]
for id, variation in enumerate(tqdm(__lowerCamelCase , desc="""Total completion: """ , leave=__lowerCamelCase ) ):
__lowerCAmelCase =base_cmd + variation.split()
results.append(
process_run(
id + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , args.target_metric_key , __lowerCamelCase , args.repeat_times , __lowerCamelCase , args.verbose , ) )
process_results(__lowerCamelCase , args.target_metric_key , __lowerCamelCase , args.base_variation , __lowerCamelCase )
if __name__ == "__main__":
main()
| 354 |
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
A: Union[str, Any] = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
A: Tuple = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
A: Tuple = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
A: Any = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"""] , reference_urls=[
"""https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score""",
"""https://en.wikipedia.org/wiki/METEOR""",
] , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
import nltk
nltk.download("""wordnet""" )
if NLTK_VERSION >= version.Version("""3.6.5""" ):
nltk.download("""punkt""" )
if NLTK_VERSION >= version.Version("""3.6.6""" ):
nltk.download("""omw-1.4""" )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0.9 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=0.5 ) -> Optional[Any]:
'''simple docstring'''
if NLTK_VERSION >= version.Version("""3.6.5""" ):
UpperCAmelCase : int = [
meteor_score.single_meteor_score(
word_tokenize(_SCREAMING_SNAKE_CASE ) , word_tokenize(_SCREAMING_SNAKE_CASE ) , alpha=_SCREAMING_SNAKE_CASE , beta=_SCREAMING_SNAKE_CASE , gamma=_SCREAMING_SNAKE_CASE )
for ref, pred in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
]
else:
UpperCAmelCase : List[str] = [
meteor_score.single_meteor_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , alpha=_SCREAMING_SNAKE_CASE , beta=_SCREAMING_SNAKE_CASE , gamma=_SCREAMING_SNAKE_CASE )
for ref, pred in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
]
return {"meteor": np.mean(_SCREAMING_SNAKE_CASE )}
| 160 | 0 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _UpperCamelCase :
'''simple docstring'''
@staticmethod
def __lowerCamelCase ( *_lowerCAmelCase : Any , **_lowerCAmelCase : Tuple):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
__lowercase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
__lowercase =image_classifier(_lowerCAmelCase , candidate_labels=['a', 'b', 'c'])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_lowerCAmelCase) , [
[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}],
[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'c'}, {'score': 0.333, 'label': 'b'}],
] , )
__lowercase =image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(_lowerCAmelCase) , [
[
{'score': 0.333, 'label': ANY(_lowerCAmelCase)},
{'score': 0.333, 'label': ANY(_lowerCAmelCase)},
{'score': 0.333, 'label': ANY(_lowerCAmelCase)},
],
[
{'score': 0.333, 'label': ANY(_lowerCAmelCase)},
{'score': 0.333, 'label': ANY(_lowerCAmelCase)},
{'score': 0.333, 'label': ANY(_lowerCAmelCase)},
],
[
{'score': 0.333, 'label': ANY(_lowerCAmelCase)},
{'score': 0.333, 'label': ANY(_lowerCAmelCase)},
{'score': 0.333, 'label': ANY(_lowerCAmelCase)},
],
[
{'score': 0.333, 'label': ANY(_lowerCAmelCase)},
{'score': 0.333, 'label': ANY(_lowerCAmelCase)},
{'score': 0.333, 'label': ANY(_lowerCAmelCase)},
],
[
{'score': 0.333, 'label': ANY(_lowerCAmelCase)},
{'score': 0.333, 'label': ANY(_lowerCAmelCase)},
{'score': 0.333, 'label': ANY(_lowerCAmelCase)},
],
] , )
@require_tf
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf')
__lowercase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
__lowercase =image_classifier(_lowerCAmelCase , candidate_labels=['a', 'b', 'c'])
self.assertEqual(
nested_simplify(_lowerCAmelCase) , [{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}] , )
__lowercase =image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(_lowerCAmelCase) , [
[
{'score': 0.333, 'label': ANY(_lowerCAmelCase)},
{'score': 0.333, 'label': ANY(_lowerCAmelCase)},
{'score': 0.333, 'label': ANY(_lowerCAmelCase)},
],
[
{'score': 0.333, 'label': ANY(_lowerCAmelCase)},
{'score': 0.333, 'label': ANY(_lowerCAmelCase)},
{'score': 0.333, 'label': ANY(_lowerCAmelCase)},
],
[
{'score': 0.333, 'label': ANY(_lowerCAmelCase)},
{'score': 0.333, 'label': ANY(_lowerCAmelCase)},
{'score': 0.333, 'label': ANY(_lowerCAmelCase)},
],
[
{'score': 0.333, 'label': ANY(_lowerCAmelCase)},
{'score': 0.333, 'label': ANY(_lowerCAmelCase)},
{'score': 0.333, 'label': ANY(_lowerCAmelCase)},
],
[
{'score': 0.333, 'label': ANY(_lowerCAmelCase)},
{'score': 0.333, 'label': ANY(_lowerCAmelCase)},
{'score': 0.333, 'label': ANY(_lowerCAmelCase)},
],
] , )
@slow
@require_torch
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
__lowercase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
__lowercase =image_classifier(_lowerCAmelCase , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(_lowerCAmelCase) , [
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
] , )
__lowercase =image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(_lowerCAmelCase) , [
[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf')
# This is an image of 2 cats with remotes and no planes
__lowercase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
__lowercase =image_classifier(_lowerCAmelCase , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(_lowerCAmelCase) , [
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
] , )
__lowercase =image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(_lowerCAmelCase) , [
[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
],
]
* 5 , )
| 454 |
'''simple docstring'''
from decimal import Decimal, getcontext
from math import ceil, factorial
def _A ( _lowerCAmelCase ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
__lowercase =precision
__lowercase =ceil(precision / 14 )
__lowercase =426_880 * Decimal(10_005 ).sqrt()
__lowercase =1
__lowercase =13_591_409
__lowercase =Decimal(_lowerCAmelCase )
for k in range(1 , _lowerCAmelCase ):
__lowercase =factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowerCAmelCase ) ** 3)
linear_term += 545_140_134
exponential_term *= -262_537_412_640_768_000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
lowerCamelCase = 50
print(f"The first {n} digits of pi is: {pi(n)}")
| 454 | 1 |
'''simple docstring'''
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any="attention" ) -> Optional[int]:
__snake_case = params[F'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
__snake_case = params[F'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
__snake_case = params[F'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
__snake_case = params[F'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def __UpperCAmelCase ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str]=False ) -> Optional[int]:
if split_mlp_wi:
__snake_case = params[F'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
__snake_case = params[F'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
__snake_case = (wi_a, wi_a)
else:
__snake_case = params[F'''{prefix}/layers_{i}/mlp/wi/kernel''']
__snake_case = params[F'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def __UpperCAmelCase ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] ) -> int:
return params[F'''{prefix}/layers_{i}/{layer_name}/scale''']
def __UpperCAmelCase ( _UpperCAmelCase : dict , *, _UpperCAmelCase : int , _UpperCAmelCase : bool ) -> Optional[int]:
__snake_case = traverse_util.flatten_dict(variables["target"] )
__snake_case = {"/".join(_UpperCAmelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__snake_case = "encoder/layers_0/mlp/wi_0/kernel" in old
print("Split MLP:" , _UpperCAmelCase )
__snake_case = collections.OrderedDict()
# Shared embeddings.
__snake_case = old["token_embedder/embedding"]
# Encoder.
for i in range(_UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
__snake_case = tax_layer_norm_lookup(_UpperCAmelCase , _UpperCAmelCase , "encoder" , "pre_attention_layer_norm" )
__snake_case , __snake_case , __snake_case , __snake_case = tax_attention_lookup(_UpperCAmelCase , _UpperCAmelCase , "encoder" , "attention" )
__snake_case = layer_norm
__snake_case = k.T
__snake_case = o.T
__snake_case = q.T
__snake_case = v.T
# Block i, layer 1 (MLP).
__snake_case = tax_layer_norm_lookup(_UpperCAmelCase , _UpperCAmelCase , "encoder" , "pre_mlp_layer_norm" )
__snake_case , __snake_case = tax_mlp_lookup(_UpperCAmelCase , _UpperCAmelCase , "encoder" , _UpperCAmelCase )
__snake_case = layer_norm
if split_mlp_wi:
__snake_case = wi[0].T
__snake_case = wi[1].T
else:
__snake_case = wi.T
__snake_case = wo.T
__snake_case = old[
"encoder/relpos_bias/rel_embedding"
].T
__snake_case = old["encoder/encoder_norm/scale"]
if not is_encoder_only:
# Decoder.
for i in range(_UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
__snake_case = tax_layer_norm_lookup(_UpperCAmelCase , _UpperCAmelCase , "decoder" , "pre_self_attention_layer_norm" )
__snake_case , __snake_case , __snake_case , __snake_case = tax_attention_lookup(_UpperCAmelCase , _UpperCAmelCase , "decoder" , "self_attention" )
__snake_case = layer_norm
__snake_case = k.T
__snake_case = o.T
__snake_case = q.T
__snake_case = v.T
# Block i, layer 1 (Cross Attention).
__snake_case = tax_layer_norm_lookup(_UpperCAmelCase , _UpperCAmelCase , "decoder" , "pre_cross_attention_layer_norm" )
__snake_case , __snake_case , __snake_case , __snake_case = tax_attention_lookup(_UpperCAmelCase , _UpperCAmelCase , "decoder" , "encoder_decoder_attention" )
__snake_case = layer_norm
__snake_case = k.T
__snake_case = o.T
__snake_case = q.T
__snake_case = v.T
# Block i, layer 2 (MLP).
__snake_case = tax_layer_norm_lookup(_UpperCAmelCase , _UpperCAmelCase , "decoder" , "pre_mlp_layer_norm" )
__snake_case , __snake_case = tax_mlp_lookup(_UpperCAmelCase , _UpperCAmelCase , "decoder" , _UpperCAmelCase )
__snake_case = layer_norm
if split_mlp_wi:
__snake_case = wi[0].T
__snake_case = wi[1].T
else:
__snake_case = wi.T
__snake_case = wo.T
__snake_case = old["decoder/decoder_norm/scale"]
__snake_case = old[
"decoder/relpos_bias/rel_embedding"
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__snake_case = old["decoder/logits_dense/kernel"].T
return new
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : bool ) -> Optional[int]:
__snake_case = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__snake_case = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__snake_case = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
__snake_case = state_dict["shared.weight"]
return state_dict
def __UpperCAmelCase ( _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any ) -> Dict:
__snake_case = checkpoints.load_tax_checkpoint(_UpperCAmelCase )
__snake_case = convert_tax_to_pytorch(_UpperCAmelCase , num_layers=config.num_layers , is_encoder_only=_UpperCAmelCase )
__snake_case = make_state_dict(_UpperCAmelCase , _UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : bool = False ) -> Optional[Any]:
__snake_case = TaConfig.from_json_file(_UpperCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__snake_case = TaEncoderModel(_UpperCAmelCase )
else:
__snake_case = TaForConditionalGeneration(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(_UpperCAmelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(_UpperCAmelCase )
print("Done" )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
a : Dict = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 69 |
"""simple docstring"""
def lowercase__ ( snake_case_ :Dict ): # noqa: E741
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = 0
__UpperCAmelCase = [0] * n
__UpperCAmelCase = [False] * n
__UpperCAmelCase = [False] * n
def dfs(snake_case_ :Tuple , snake_case_ :Union[str, Any] , snake_case_ :Any , snake_case_ :int ):
if parent == root:
out_edge_count += 1
__UpperCAmelCase = True
__UpperCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__UpperCAmelCase = dfs(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__UpperCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__UpperCAmelCase = True
# AP found via cycle
if at == low[to]:
__UpperCAmelCase = True
else:
__UpperCAmelCase = min(low[at] , snake_case_ )
return out_edge_count
for i in range(snake_case_ ):
if not visited[i]:
__UpperCAmelCase = 0
__UpperCAmelCase = dfs(snake_case_ , snake_case_ , -1 , snake_case_ )
__UpperCAmelCase = out_edge_count > 1
for x in range(len(snake_case_ ) ):
if is_art[x] is True:
print(snake_case_ )
# Adjacency list of graph
_lowercase : Optional[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 49 | 0 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_SCREAMING_SNAKE_CASE : Any = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_SCREAMING_SNAKE_CASE : Union[str, Any] = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
_SCREAMING_SNAKE_CASE : Tuple = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Union[str, Any]):
if version.parse(scb.__version__) < version.parse("1.4.12"):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`.")
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence"),
"references": datasets.Sequence(datasets.Value("string" , id="sequence") , id="references"),
}) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def UpperCAmelCase__ ( self : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , ):
_lowercase: Dict = len(references[0])
if any(len(_UpperCamelCase) != references_per_prediction for refs in references):
raise ValueError("Sacrebleu requires the same number of references for each prediction")
_lowercase: List[Any] = [[refs[i] for refs in references] for i in range(_UpperCamelCase)]
_lowercase: Optional[int] = TER(
normalized=_UpperCamelCase , no_punct=_UpperCamelCase , asian_support=_UpperCamelCase , case_sensitive=_UpperCamelCase , )
_lowercase: str = sb_ter.corpus_score(_UpperCamelCase , _UpperCamelCase)
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 206 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[int] = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class A ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : Optional[int] = """data2vec-text"""
def __init__( self : List[str] , _UpperCamelCase : List[str]=30_522 , _UpperCamelCase : Union[str, Any]=768 , _UpperCamelCase : Dict=12 , _UpperCamelCase : Optional[Any]=12 , _UpperCamelCase : Optional[Any]=3_072 , _UpperCamelCase : List[Any]="gelu" , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : Dict=0.1 , _UpperCamelCase : Dict=512 , _UpperCamelCase : Optional[Any]=2 , _UpperCamelCase : Any=0.0_2 , _UpperCamelCase : Dict=1e-12 , _UpperCamelCase : Union[str, Any]=1 , _UpperCamelCase : Optional[Any]=0 , _UpperCamelCase : Dict=2 , _UpperCamelCase : Optional[Any]="absolute" , _UpperCamelCase : Any=True , _UpperCamelCase : List[str]=None , **_UpperCamelCase : Tuple , ):
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase)
_lowercase: str = vocab_size
_lowercase: Tuple = hidden_size
_lowercase: Optional[int] = num_hidden_layers
_lowercase: Optional[Any] = num_attention_heads
_lowercase: Any = hidden_act
_lowercase: Any = intermediate_size
_lowercase: List[Any] = hidden_dropout_prob
_lowercase: Optional[int] = attention_probs_dropout_prob
_lowercase: Optional[Any] = max_position_embeddings
_lowercase: str = type_vocab_size
_lowercase: List[Any] = initializer_range
_lowercase: List[str] = layer_norm_eps
_lowercase: int = position_embedding_type
_lowercase: Union[str, Any] = use_cache
_lowercase: Any = classifier_dropout
class A ( lowerCamelCase_ ):
'''simple docstring'''
@property
def UpperCAmelCase__ ( self : Optional[Any]):
if self.task == "multiple-choice":
_lowercase: str = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowercase: Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 206 | 1 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __A ( _A ):
"""simple docstring"""
if is_torch_version("<" , "2.0.0" ) or not hasattr(_A , "_dynamo" ):
return False
return isinstance(_A , torch._dynamo.eval_frame.OptimizedModule )
def __A ( _A , _A = True ):
"""simple docstring"""
__a = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__a = is_compiled_module(_A )
if is_compiled:
__a = model
__a = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_A , _A ):
__a = model.module
if not keep_fpaa_wrapper:
__a = getattr(_A , "forward" )
__a = model.__dict__.pop("_original_forward" , _A )
if original_forward is not None:
while hasattr(_A , "__wrapped__" ):
__a = forward.__wrapped__
if forward == original_forward:
break
__a = forward
if getattr(_A , "_converted_to_transformer_engine" , _A ):
convert_model(_A , to_transformer_engine=_A )
if is_compiled:
__a = model
__a = compiled_model
return model
def __A ( ):
"""simple docstring"""
PartialState().wait_for_everyone()
def __A ( _A , _A ):
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_A , _A )
elif PartialState().local_process_index == 0:
torch.save(_A , _A )
@contextmanager
def __A ( **_A ):
"""simple docstring"""
for key, value in kwargs.items():
__a = str(_A )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __A ( _A ):
"""simple docstring"""
if not hasattr(_A , "__qualname__" ) and not hasattr(_A , "__name__" ):
__a = getattr(_A , "__class__" , _A )
if hasattr(_A , "__qualname__" ):
return obj.__qualname__
if hasattr(_A , "__name__" ):
return obj.__name__
return str(_A )
def __A ( _A , _A ):
"""simple docstring"""
for key, value in source.items():
if isinstance(_A , _A ):
__a = destination.setdefault(_A , {} )
merge_dicts(_A , _A )
else:
__a = value
return destination
def __A ( _A = None ):
"""simple docstring"""
if port is None:
__a = 2_9500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 197 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Dict = {
"""google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class A_ ( a_ ):
_SCREAMING_SNAKE_CASE = """canine"""
def __init__( self : int , __SCREAMING_SNAKE_CASE : List[str]=7_68 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : str=12 , __SCREAMING_SNAKE_CASE : int=30_72 , __SCREAMING_SNAKE_CASE : int="gelu" , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=1_63_84 , __SCREAMING_SNAKE_CASE : List[Any]=16 , __SCREAMING_SNAKE_CASE : int=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=1E-12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0 , __SCREAMING_SNAKE_CASE : List[str]=0xE000 , __SCREAMING_SNAKE_CASE : int=0xE001 , __SCREAMING_SNAKE_CASE : Optional[int]=4 , __SCREAMING_SNAKE_CASE : Optional[Any]=4 , __SCREAMING_SNAKE_CASE : List[Any]=8 , __SCREAMING_SNAKE_CASE : List[str]=1_63_84 , __SCREAMING_SNAKE_CASE : Optional[Any]=1_28 , **__SCREAMING_SNAKE_CASE : List[str] , ):
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__a = max_position_embeddings
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = type_vocab_size
__a = layer_norm_eps
# Character config:
__a = downsampling_rate
__a = upsampling_kernel_size
__a = num_hash_functions
__a = num_hash_buckets
__a = local_transformer_stride
| 197 | 1 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase__ : List[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase__ : List[str] = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
UpperCAmelCase__ : int = {
"""allenai/led-base-16384""": 16_384,
}
class a__ ( UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict =VOCAB_FILES_NAMES
UpperCAmelCase__ : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[Any] =LEDTokenizer
UpperCAmelCase__ : Dict =["""input_ids""", """attention_mask"""]
def __init__( self : Dict , UpperCAmelCase__ : str=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Any="replace" , UpperCAmelCase__ : str="<s>" , UpperCAmelCase__ : List[str]="</s>" , UpperCAmelCase__ : Any="</s>" , UpperCAmelCase__ : Tuple="<s>" , UpperCAmelCase__ : Optional[Any]="<unk>" , UpperCAmelCase__ : List[str]="<pad>" , UpperCAmelCase__ : List[str]="<mask>" , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Dict=True , **UpperCAmelCase__ : Any , ) ->Optional[int]:
"""simple docstring"""
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , errors=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , **UpperCAmelCase__ , )
SCREAMING_SNAKE_CASE : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCAmelCase__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE : str = getattr(UpperCAmelCase__ , pre_tok_state.pop("""type""" ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = add_prefix_space
SCREAMING_SNAKE_CASE : str = pre_tok_class(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Dict = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE : Dict = """post_processor"""
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE : Union[str, Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE : Tuple = tuple(state["""sep"""] )
if "cls" in state:
SCREAMING_SNAKE_CASE : Optional[Any] = tuple(state["""cls"""] )
SCREAMING_SNAKE_CASE : Optional[int] = False
if state.get("""add_prefix_space""" , UpperCAmelCase__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space
SCREAMING_SNAKE_CASE : Any = True
if state.get("""trim_offsets""" , UpperCAmelCase__ ) != trim_offsets:
SCREAMING_SNAKE_CASE : int = trim_offsets
SCREAMING_SNAKE_CASE : int = True
if changes_to_apply:
SCREAMING_SNAKE_CASE : Dict = getattr(UpperCAmelCase__ , state.pop("""type""" ) )
SCREAMING_SNAKE_CASE : Tuple = component_class(**UpperCAmelCase__ )
setattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _lowercase ( self : str ) ->str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _lowercase ( self : Dict , UpperCAmelCase__ : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else value
SCREAMING_SNAKE_CASE : Any = value
def _lowercase ( self : Optional[Any] , *UpperCAmelCase__ : int , **UpperCAmelCase__ : int ) ->BatchEncoding:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = kwargs.get("""is_split_into_words""" , UpperCAmelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowercase ( self : List[Any] , *UpperCAmelCase__ : int , **UpperCAmelCase__ : str ) ->BatchEncoding:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = kwargs.get("""is_split_into_words""" , UpperCAmelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowercase ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int]=None ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = [self.sep_token_id]
SCREAMING_SNAKE_CASE : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : int , UpperCAmelCase__ : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , ) ->dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = super()._pad(
encoded_inputs=UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding_strategy=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE : List[str] = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE : Any = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE : Optional[Any] = len(encoded_inputs["""global_attention_mask"""] ) != len(UpperCAmelCase__ )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE : Optional[int] = len(UpperCAmelCase__ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE : Union[str, Any] = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE : int = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 446 |
from math import ceil
def __lowercase ( _A = 1001 ) -> int:
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
SCREAMING_SNAKE_CASE : Dict = 2 * i + 1
SCREAMING_SNAKE_CASE : Optional[int] = 2 * i
SCREAMING_SNAKE_CASE : str = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
UpperCAmelCase__ : str = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 446 | 1 |
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
lowerCamelCase_ = logging.get_logger(__name__)
class UpperCamelCase_ (__A ):
__magic_name__ = '''vision-encoder-decoder'''
__magic_name__ = True
def __init__( self : Union[str, Any] , **lowerCAmelCase_ : List[Any] ) -> Union[str, Any]:
super().__init__(**lowerCAmelCase_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"""A configuraton of type {self.model_type} cannot be instantiated because """
f"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
UpperCAmelCase_ : str = kwargs.pop("encoder" )
UpperCAmelCase_ : Dict = encoder_config.pop("model_type" )
UpperCAmelCase_ : List[Any] = kwargs.pop("decoder" )
UpperCAmelCase_ : Dict = decoder_config.pop("model_type" )
UpperCAmelCase_ : Union[str, Any] = AutoConfig.for_model(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = AutoConfig.for_model(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = True
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] , lowerCAmelCase_ : PretrainedConfig , lowerCAmelCase_ : PretrainedConfig , **lowerCAmelCase_ : Dict ) -> PretrainedConfig:
logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : List[str] = self.encoder.to_dict()
UpperCAmelCase_ : List[Any] = self.decoder.to_dict()
UpperCAmelCase_ : Any = self.__class__.model_type
return output
class UpperCamelCase_ (__A ):
__magic_name__ = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> float:
return 1e-4
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}} )
class UpperCamelCase_ (__A ):
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase_ : List[Any] = OrderedDict()
UpperCAmelCase_ : int = {0: "batch", 1: "past_decoder_sequence + sequence"}
UpperCAmelCase_ : Dict = {0: "batch", 1: "past_decoder_sequence + sequence"}
UpperCAmelCase_ : Union[str, Any] = {0: "batch", 1: "encoder_sequence"}
return common_inputs
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : "PreTrainedTokenizerBase" , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
import torch
UpperCAmelCase_ : Optional[int] = OrderedDict()
UpperCAmelCase_ : Any = super().generate_dummy_inputs(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = dummy_input["input_ids"].shape
UpperCAmelCase_ : List[str] = (batch, encoder_sequence, self._config.encoder_hidden_size)
UpperCAmelCase_ : List[str] = dummy_input.pop("input_ids" )
UpperCAmelCase_ : List[str] = dummy_input.pop("attention_mask" )
UpperCAmelCase_ : List[Any] = torch.zeros(lowerCAmelCase_ )
return common_inputs
class UpperCamelCase_ (__A ):
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> None:
pass
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : PretrainedConfig ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : PretrainedConfig , lowerCAmelCase_ : PretrainedConfig , lowerCAmelCase_ : str = "default" ) -> OnnxConfig:
UpperCAmelCase_ : Optional[int] = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(lowerCAmelCase_ , lowerCAmelCase_ )
| 95 |
"""simple docstring"""
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
class UpperCamelCase_ :
__magic_name__ = 42
__magic_name__ = None
@staticmethod
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
raise NotImplementedError
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : str , **lowerCAmelCase_ : Union[str, Any] ) -> Optional[Any]:
raise NotImplementedError
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] ) -> str:
raise NotImplementedError
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
if not self.is_available():
raise RuntimeError(
f"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Tuple ) -> List[str]:
return f"""`pip install {cls.pip_package or cls.name}`"""
class UpperCamelCase_ (__A ):
__magic_name__ = '''optuna'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
return is_optuna_available()
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , **lowerCAmelCase_ : Tuple ) -> int:
return run_hp_search_optuna(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Dict ) -> Optional[Any]:
return default_hp_space_optuna(lowerCAmelCase_ )
class UpperCamelCase_ (__A ):
__magic_name__ = '''ray'''
__magic_name__ = '''\'ray[tune]\''''
@staticmethod
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
return is_ray_available()
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , **lowerCAmelCase_ : Tuple ) -> List[str]:
return run_hp_search_ray(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Tuple ) -> Optional[int]:
return default_hp_space_ray(lowerCAmelCase_ )
class UpperCamelCase_ (__A ):
__magic_name__ = '''sigopt'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
return is_sigopt_available()
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : str , **lowerCAmelCase_ : str ) -> str:
return run_hp_search_sigopt(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Any ) -> Any:
return default_hp_space_sigopt(lowerCAmelCase_ )
class UpperCamelCase_ (__A ):
__magic_name__ = '''wandb'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( ) -> int:
return is_wandb_available()
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : str , **lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]:
return run_hp_search_wandb(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : List[str] ) -> Any:
return default_hp_space_wandb(lowerCAmelCase_ )
lowerCamelCase_ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def snake_case ( ):
UpperCAmelCase_ : List[str] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(A__ ) > 0:
UpperCAmelCase_ : Optional[Any] = available_backends[0].name
if len(A__ ) > 1:
logger.info(
F"""{len(A__ )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
F""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 95 | 1 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowerCAmelCase__ = '''\
Text data.
Second line of data.'''
lowerCAmelCase__ = '''file'''
@pytest.fixture(scope='''session''' )
def snake_case_ ( A_ : Any ):
'''simple docstring'''
_lowerCamelCase : str = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
_lowerCamelCase : List[Any] = bytes(A_, '''utf-8''' )
with zstd.open(A_, '''wb''' ) as f:
f.write(A_ )
return path
@pytest.fixture
def snake_case_ ( A_ : Dict ):
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir, A_ ), '''w''' ) as f:
f.write(A_ )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''', ['''gzip''', '''xz''', '''zstd'''] )
def snake_case_ ( A_ : List[str], A_ : str, A_ : Optional[int], A_ : Dict, A_ : Optional[Any], A_ : Dict ):
'''simple docstring'''
_lowerCamelCase : str = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
_lowerCamelCase : int = input_paths[compression_format]
_lowerCamelCase : Any = tmp_path / '''cache'''
_lowerCamelCase : str = DownloadConfig(cache_dir=A_, extract_compressed_file=A_ )
_lowerCamelCase : Dict = cached_path(A_, download_config=A_ )
with open(A_ ) as f:
_lowerCamelCase : Optional[int] = f.read()
with open(A_ ) as f:
_lowerCamelCase : List[Any] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''', [True, False] )
@pytest.mark.parametrize('''default_cache_dir''', [True, False] )
def snake_case_ ( A_ : Dict, A_ : List[Any], A_ : Any, A_ : Optional[Any], A_ : str ):
'''simple docstring'''
_lowerCamelCase : Any = '''custom_cache'''
_lowerCamelCase : List[str] = '''custom_extracted_dir'''
_lowerCamelCase : Union[str, Any] = tmp_path / '''custom_extracted_path'''
if default_extracted:
_lowerCamelCase : List[Any] = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''', A_ )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''', str(A_ ) )
_lowerCamelCase : Union[str, Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_lowerCamelCase : int = xz_file
_lowerCamelCase : int = (
DownloadConfig(extract_compressed_file=A_ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir, extract_compressed_file=A_ )
)
_lowerCamelCase : Any = cached_path(A_, download_config=A_ )
assert Path(A_ ).parent.parts[-2:] == expected
def snake_case_ ( A_ : Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = str(Path(A_ ).resolve() )
assert cached_path(A_ ) == text_file
# relative path
_lowerCamelCase : Any = str(Path(A_ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(A_ ) == text_file
def snake_case_ ( A_ : Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : int = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(A_ ):
cached_path(A_ )
# relative path
_lowerCamelCase : Optional[int] = '''./__missing_file__.txt'''
with pytest.raises(A_ ):
cached_path(A_ )
def snake_case_ ( A_ : Tuple ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(A_ ) as f:
_lowerCamelCase : Dict = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''', A_ )
def snake_case_ ( ):
'''simple docstring'''
with pytest.raises(A_ ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''', A_ )
def snake_case_ ( A_ : Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(A_ ):
http_get('''https://huggingface.co''', temp_file=A_ )
with pytest.raises(A_ ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''', A_ )
def snake_case_ ( A_ : Dict ):
'''simple docstring'''
_lowerCamelCase : str = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(A_ ):
ftp_get('''ftp://huggingface.co''', temp_file=A_ )
with pytest.raises(A_ ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''', A_ )
def snake_case_ ( A_ : Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(A_ ):
fsspec_get('''s3://huggingface.co''', temp_file=A_ )
with pytest.raises(A_ ):
fsspec_head('''s3://huggingface.co''' )
| 702 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class __snake_case ( _lowercase):
snake_case__ : Dict = "blenderbot-small"
snake_case__ : Optional[int] = ["past_key_values"]
snake_case__ : str = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : int , __lowerCAmelCase : Dict=5_0_2_6_5 , __lowerCAmelCase : str=5_1_2 , __lowerCAmelCase : Any=8 , __lowerCAmelCase : Any=2_0_4_8 , __lowerCAmelCase : List[str]=1_6 , __lowerCAmelCase : Dict=8 , __lowerCAmelCase : int=2_0_4_8 , __lowerCAmelCase : List[str]=1_6 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : int=0.0 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : Union[str, Any]=5_1_2 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[Any]=0.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : str=0.02 , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : int=0 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Union[str, Any]=2 , **__lowerCAmelCase : Optional[int] , ):
"""simple docstring"""
_lowerCamelCase : Dict = vocab_size
_lowerCamelCase : int = max_position_embeddings
_lowerCamelCase : Tuple = d_model
_lowerCamelCase : Optional[Any] = encoder_ffn_dim
_lowerCamelCase : Optional[int] = encoder_layers
_lowerCamelCase : Any = encoder_attention_heads
_lowerCamelCase : int = decoder_ffn_dim
_lowerCamelCase : str = decoder_layers
_lowerCamelCase : str = decoder_attention_heads
_lowerCamelCase : int = dropout
_lowerCamelCase : Optional[Any] = attention_dropout
_lowerCamelCase : Optional[int] = activation_dropout
_lowerCamelCase : str = activation_function
_lowerCamelCase : Dict = init_std
_lowerCamelCase : Optional[int] = encoder_layerdrop
_lowerCamelCase : List[str] = decoder_layerdrop
_lowerCamelCase : Optional[int] = use_cache
_lowerCamelCase : Dict = encoder_layers
_lowerCamelCase : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , is_encoder_decoder=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , forced_eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
class __snake_case ( _lowercase):
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_lowerCamelCase : List[str] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
_lowerCamelCase : List[Any] = {0: '''batch'''}
_lowerCamelCase : int = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
_lowerCamelCase : Union[str, Any] = {0: '''batch''', 1: '''decoder_sequence'''}
_lowerCamelCase : Any = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__lowerCAmelCase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowerCamelCase : Any = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
_lowerCamelCase , _lowerCamelCase : str = self.num_layers
for i in range(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
_lowerCamelCase : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
_lowerCamelCase : int = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_lowerCamelCase : List[str] = super().outputs
else:
_lowerCamelCase : int = super(__lowerCAmelCase , self ).outputs
if self.use_past:
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.num_layers
for i in range(__lowerCAmelCase ):
_lowerCamelCase : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
_lowerCamelCase : Optional[int] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : PreTrainedTokenizer , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[TensorType] = None , ):
"""simple docstring"""
_lowerCamelCase : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Generate decoder inputs
_lowerCamelCase : Dict = seq_length if not self.use_past else 1
_lowerCamelCase : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
_lowerCamelCase : str = dict(**__lowerCAmelCase , **__lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_lowerCamelCase , _lowerCamelCase : Optional[Any] = common_inputs['''input_ids'''].shape
_lowerCamelCase : str = common_inputs['''decoder_input_ids'''].shape[1]
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.num_attention_heads
_lowerCamelCase : Optional[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCamelCase : Any = decoder_seq_length + 3
_lowerCamelCase : List[str] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowerCamelCase : int = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(__lowerCAmelCase , __lowerCAmelCase )] , dim=1 )
_lowerCamelCase : Optional[int] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowerCamelCase , _lowerCamelCase : int = self.num_layers
_lowerCamelCase : Union[str, Any] = min(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Tuple = max(__lowerCAmelCase , __lowerCAmelCase ) - min_num_layers
_lowerCamelCase : Tuple = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(__lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__lowerCAmelCase ),
torch.zeros(__lowerCAmelCase ),
torch.zeros(__lowerCAmelCase ),
torch.zeros(__lowerCAmelCase ),
) )
# TODO: test this.
_lowerCamelCase : Optional[int] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(__lowerCAmelCase , __lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(__lowerCAmelCase ), torch.zeros(__lowerCAmelCase )) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : PreTrainedTokenizer , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[TensorType] = None , ):
"""simple docstring"""
_lowerCamelCase : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_lowerCamelCase , _lowerCamelCase : List[Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_lowerCamelCase : Dict = seqlen + 2
_lowerCamelCase , _lowerCamelCase : Tuple = self.num_layers
_lowerCamelCase , _lowerCamelCase : List[Any] = self.num_attention_heads
_lowerCamelCase : Tuple = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCamelCase : Tuple = common_inputs['''attention_mask'''].dtype
_lowerCamelCase : Union[str, Any] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(__lowerCAmelCase , __lowerCAmelCase , dtype=__lowerCAmelCase )] , dim=1 )
_lowerCamelCase : List[Any] = [
(torch.zeros(__lowerCAmelCase ), torch.zeros(__lowerCAmelCase )) for _ in range(__lowerCAmelCase )
]
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : PreTrainedTokenizer , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[TensorType] = None , ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCamelCase : Optional[int] = tokenizer.num_special_tokens_to_add(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_lowerCamelCase : Union[str, Any] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowerCamelCase : Optional[Any] = dict(tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase ) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : PreTrainedTokenizer , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[TensorType] = None , ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_lowerCamelCase : str = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
elif self.task == "causal-lm":
_lowerCamelCase : Dict = self._generate_dummy_inputs_for_causal_lm(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
else:
_lowerCamelCase : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_lowerCamelCase : Any = super()._flatten_past_key_values_(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
_lowerCamelCase : Dict = super(__lowerCAmelCase , self )._flatten_past_key_values_(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
| 598 | 0 |
from __future__ import annotations
class __magic_name__ :
def __init__( self : List[Any] , UpperCamelCase__ : str=None ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = data
UpperCAmelCase = None
def __repr__( self : str ) -> Any:
'''simple docstring'''
UpperCAmelCase = []
UpperCAmelCase = self
while temp:
string_rep.append(F'{temp.data}' )
UpperCAmelCase = temp.next
return "->".join(UpperCamelCase__ )
def lowerCamelCase_(lowerCamelCase_ ) -> Tuple:
if not elements_list:
raise Exception("The Elements List is empty" )
UpperCAmelCase = UpperCAmelCase = Node(elements_list[0] )
for i in range(1 , len(lowerCamelCase_ ) ):
UpperCAmelCase = Node(elements_list[i] )
UpperCAmelCase = current.next
return head
def lowerCamelCase_(lowerCamelCase_ ) -> None:
if head_node is not None and isinstance(lowerCamelCase_ , lowerCamelCase_ ):
print_reverse(head_node.next )
print(head_node.data )
def lowerCamelCase_() -> Optional[Any]:
from doctest import testmod
testmod()
UpperCAmelCase = make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(lowerCamelCase_ )
print("Elements in Reverse:" )
print_reverse(lowerCamelCase_ )
if __name__ == "__main__":
main()
| 323 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
__lowerCamelCase : Tuple = "\nHuman: <<task>>\n\nAssistant: "
__lowerCamelCase : Tuple = "huggingface-tools/default-prompts"
__lowerCamelCase : List[str] = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_="run" ) -> Union[str, Any]:
if prompt_or_repo_id is None:
UpperCAmelCase = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , lowerCamelCase_ ) is not None:
return prompt_or_repo_id
UpperCAmelCase = cached_file(
lowerCamelCase_ , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(lowerCamelCase_ , "r" , encoding="utf-8" ) as f:
return f.read()
| 323 | 1 |
from functools import lru_cache
def UpperCAmelCase_ ( __lowerCAmelCase ) -> set:
__lowercase : List[str] = 2
__lowercase : Tuple = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__lowerCAmelCase )
if n > 1:
factors.add(__lowerCAmelCase )
return factors
@lru_cache
def UpperCAmelCase_ ( __lowerCAmelCase ) -> int:
return len(unique_prime_factors(__lowerCAmelCase ) )
def UpperCAmelCase_ ( __lowerCAmelCase ) -> bool:
return len(set(__lowerCAmelCase ) ) in (0, 1)
def UpperCAmelCase_ ( __lowerCAmelCase ) -> list:
__lowercase : str = 2
while True:
# Increment each value of a generated range
__lowercase : Optional[int] = [base + i for i in range(__lowerCAmelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__lowercase : int = [upf_len(__lowerCAmelCase ) for x in group]
checker.append(__lowerCAmelCase )
# If all numbers in the list are equal, return the group variable.
if equality(__lowerCAmelCase ):
return group
# Increment our base variable by 1
base += 1
def UpperCAmelCase_ ( __lowerCAmelCase = 4 ) -> int:
__lowercase : Optional[Any] = run(__lowerCAmelCase )
return results[0] if len(__lowerCAmelCase ) else None
if __name__ == "__main__":
print(solution())
| 284 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self : Union[str, Any] ):
__lowercase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__lowercase : Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_snake_case )
__lowercase : List[str] = -1
__lowercase : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_snake_case )
__lowercase : Tuple = model.generate(_snake_case , max_new_tokens=10 , do_sample=_snake_case )
__lowercase : Optional[int] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__lowercase : Optional[Any] = TextStreamer(_snake_case )
model.generate(_snake_case , max_new_tokens=10 , do_sample=_snake_case , streamer=_snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__lowercase : List[Any] = cs.out[:-1]
self.assertEqual(_snake_case , _snake_case )
def snake_case_ ( self : Optional[int] ):
__lowercase : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__lowercase : Optional[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_snake_case )
__lowercase : List[str] = -1
__lowercase : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_snake_case )
__lowercase : List[Any] = model.generate(_snake_case , max_new_tokens=10 , do_sample=_snake_case )
__lowercase : Optional[Any] = tokenizer.decode(greedy_ids[0] )
__lowercase : int = TextIteratorStreamer(_snake_case )
__lowercase : int = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__lowercase : Optional[Any] = Thread(target=model.generate , kwargs=_snake_case )
thread.start()
__lowercase : Optional[int] = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_snake_case , _snake_case )
def snake_case_ ( self : List[str] ):
__lowercase : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__lowercase : int = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_snake_case )
__lowercase : List[Any] = -1
__lowercase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_snake_case )
__lowercase : str = model.generate(_snake_case , max_new_tokens=10 , do_sample=_snake_case )
__lowercase : List[Any] = greedy_ids[:, input_ids.shape[1] :]
__lowercase : Optional[Any] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__lowercase : int = TextStreamer(_snake_case , skip_prompt=_snake_case )
model.generate(_snake_case , max_new_tokens=10 , do_sample=_snake_case , streamer=_snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__lowercase : int = cs.out[:-1]
self.assertEqual(_snake_case , _snake_case )
def snake_case_ ( self : Any ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
__lowercase : Dict = AutoTokenizer.from_pretrained('''distilgpt2''' )
__lowercase : List[str] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_snake_case )
__lowercase : str = -1
__lowercase : Dict = torch.ones((1, 5) , device=_snake_case ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__lowercase : Optional[Any] = TextStreamer(_snake_case , skip_special_tokens=_snake_case )
model.generate(_snake_case , max_new_tokens=1 , do_sample=_snake_case , streamer=_snake_case )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__lowercase : Dict = cs.out[:-1] # Remove the final "\n"
__lowercase : Any = tokenizer(_snake_case , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def snake_case_ ( self : Any ):
__lowercase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__lowercase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_snake_case )
__lowercase : Dict = -1
__lowercase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_snake_case )
__lowercase : Tuple = TextIteratorStreamer(_snake_case , timeout=0.0_01 )
__lowercase : Tuple = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__lowercase : Optional[int] = Thread(target=model.generate , kwargs=_snake_case )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_snake_case ):
__lowercase : List[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
| 284 | 1 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = "isbn/0140328726" ):
snake_case_ = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
snake_case_ = F'''{olid} is not a valid Open Library olid'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json()
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
snake_case_ = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
snake_case_ = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
snake_case_ = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = ''', '''.join(SCREAMING_SNAKE_CASE__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
lowerCAmelCase_ = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(f"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
lowerCAmelCase_ = summarize_book(get_openlibrary_data(f"""isbn/{isbn}"""))
print('''\n'''.join(f"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f"""Sorry, there are no results for ISBN: {isbn}.""") | 39 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["""GPTSw3Tokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 199 | 0 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase =logging.get_logger(__name__)
UpperCamelCase ={
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class A ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__a : Optional[Any] = '''conditional_detr'''
__a : Tuple = ['''past_key_values''']
__a : Any = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=3 , __lowerCAmelCase=3_00 , __lowerCAmelCase=6 , __lowerCAmelCase=20_48 , __lowerCAmelCase=8 , __lowerCAmelCase=6 , __lowerCAmelCase=20_48 , __lowerCAmelCase=8 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=True , __lowerCAmelCase="relu" , __lowerCAmelCase=2_56 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1.0 , __lowerCAmelCase=False , __lowerCAmelCase="sine" , __lowerCAmelCase="resnet50" , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=2 , __lowerCAmelCase=5 , __lowerCAmelCase=2 , __lowerCAmelCase=1 , __lowerCAmelCase=1 , __lowerCAmelCase=2 , __lowerCAmelCase=5 , __lowerCAmelCase=2 , __lowerCAmelCase=0.25 , **__lowerCAmelCase , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
UpperCamelCase_ : int = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase_ : List[Any] = backbone_config.get("""model_type""" )
UpperCamelCase_ : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase_ : Any = config_class.from_dict(__lowerCAmelCase )
UpperCamelCase_ : int = use_timm_backbone
UpperCamelCase_ : int = backbone_config
UpperCamelCase_ : Dict = num_channels
UpperCamelCase_ : Optional[Any] = num_queries
UpperCamelCase_ : List[Any] = d_model
UpperCamelCase_ : Any = encoder_ffn_dim
UpperCamelCase_ : Dict = encoder_layers
UpperCamelCase_ : List[str] = encoder_attention_heads
UpperCamelCase_ : str = decoder_ffn_dim
UpperCamelCase_ : str = decoder_layers
UpperCamelCase_ : str = decoder_attention_heads
UpperCamelCase_ : Optional[int] = dropout
UpperCamelCase_ : Tuple = attention_dropout
UpperCamelCase_ : Optional[int] = activation_dropout
UpperCamelCase_ : List[Any] = activation_function
UpperCamelCase_ : str = init_std
UpperCamelCase_ : Dict = init_xavier_std
UpperCamelCase_ : Optional[int] = encoder_layerdrop
UpperCamelCase_ : Optional[Any] = decoder_layerdrop
UpperCamelCase_ : Union[str, Any] = encoder_layers
UpperCamelCase_ : List[str] = auxiliary_loss
UpperCamelCase_ : Dict = position_embedding_type
UpperCamelCase_ : Optional[Any] = backbone
UpperCamelCase_ : Any = use_pretrained_backbone
UpperCamelCase_ : Union[str, Any] = dilation
# Hungarian matcher
UpperCamelCase_ : Dict = class_cost
UpperCamelCase_ : Dict = bbox_cost
UpperCamelCase_ : Tuple = giou_cost
# Loss coefficients
UpperCamelCase_ : Tuple = mask_loss_coefficient
UpperCamelCase_ : Optional[Any] = dice_loss_coefficient
UpperCamelCase_ : Union[str, Any] = cls_loss_coefficient
UpperCamelCase_ : Union[str, Any] = bbox_loss_coefficient
UpperCamelCase_ : Optional[int] = giou_loss_coefficient
UpperCamelCase_ : List[str] = focal_alpha
super().__init__(is_encoder_decoder=__lowerCAmelCase , **__lowerCAmelCase )
@property
def _UpperCAmelCase ( self ):
return self.encoder_attention_heads
@property
def _UpperCAmelCase ( self ):
return self.d_model
def _UpperCAmelCase ( self ):
UpperCamelCase_ : List[Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCamelCase_ : str = self.backbone_config.to_dict()
UpperCamelCase_ : Optional[int] = self.__class__.model_type
return output
class A ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__a : str = version.parse('''1.11''' )
@property
def _UpperCAmelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _UpperCAmelCase ( self ):
return 1E-5
@property
def _UpperCAmelCase ( self ):
return 12
| 543 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def snake_case ( a_ : List[Any] ) -> Any:
"""simple docstring"""
for param in module.parameters():
UpperCamelCase_ : Dict = False
def snake_case ( ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : List[str] = """cuda""" if torch.cuda.is_available() else """cpu"""
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCamelCase_ : int = """mps"""
if device == "mps":
print(
"""WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"""
""" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"""
""" with generations.""" )
return device
def snake_case ( a_ : str ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = plt.imshow(a_ )
fig.axes.get_xaxis().set_visible(a_ )
fig.axes.get_yaxis().set_visible(a_ )
plt.show()
def snake_case ( ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = datetime.now()
UpperCamelCase_ : int = current_time.strftime("""%H:%M:%S""" )
return timestamp
| 543 | 1 |
'''simple docstring'''
__A : List[str] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__A : Optional[Any] = [{'type': 'code', 'content': INSTALL_CONTENT}]
__A : Dict = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 394 |
'''simple docstring'''
def lowerCAmelCase_ ( a : int , a : int ):
return 1 if input_a == input_a else 0
def lowerCAmelCase_ ( ):
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 394 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
UpperCamelCase : int = {
"google/tapas-base-finetuned-sqa": (
"https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"
),
"google/tapas-base-finetuned-wtq": (
"https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"
),
"google/tapas-base-finetuned-wikisql-supervised": (
"https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"
),
"google/tapas-base-finetuned-tabfact": (
"https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = "tapas"
def __init__( self , __UpperCAmelCase=3_0522 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1024 , __UpperCAmelCase=[3, 256, 256, 2, 256, 256, 10] , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=0 , __UpperCAmelCase=1_0.0 , __UpperCAmelCase=0 , __UpperCAmelCase=1.0 , __UpperCAmelCase=None , __UpperCAmelCase=1.0 , __UpperCAmelCase=False , __UpperCAmelCase=None , __UpperCAmelCase=1.0 , __UpperCAmelCase=1.0 , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase="ratio" , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=64 , __UpperCAmelCase=32 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_sizes
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
# Fine-tuning task hyperparameters
__UpperCamelCase = positive_label_weight
__UpperCamelCase = num_aggregation_labels
__UpperCamelCase = aggregation_loss_weight
__UpperCamelCase = use_answer_as_supervision
__UpperCamelCase = answer_loss_importance
__UpperCamelCase = use_normalized_answer_loss
__UpperCamelCase = huber_loss_delta
__UpperCamelCase = temperature
__UpperCamelCase = aggregation_temperature
__UpperCamelCase = use_gumbel_for_cells
__UpperCamelCase = use_gumbel_for_aggregation
__UpperCamelCase = average_approximation_function
__UpperCamelCase = cell_selection_preference
__UpperCamelCase = answer_loss_cutoff
__UpperCamelCase = max_num_rows
__UpperCamelCase = max_num_columns
__UpperCamelCase = average_logits_per_cell
__UpperCamelCase = select_one_column
__UpperCamelCase = allow_empty_column_selection
__UpperCamelCase = init_cell_selection_weights_to_zero
__UpperCamelCase = reset_position_index_per_cell
__UpperCamelCase = disable_per_token_loss
# Aggregation hyperparameters
__UpperCamelCase = aggregation_labels
__UpperCamelCase = no_aggregation_label_index
if isinstance(self.aggregation_labels , __UpperCAmelCase ):
__UpperCamelCase = {int(__UpperCAmelCase ): v for k, v in aggregation_labels.items()}
| 293 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A ( ) -> List[Any]:
__UpperCamelCase = ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=snake_case , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=snake_case , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=snake_case )
return parser.parse_args()
def A ( ) -> Optional[int]:
__UpperCamelCase = parse_args()
# Import training_script as a module.
__UpperCamelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__UpperCamelCase = script_fpath.stem
__UpperCamelCase = importlib.import_module(snake_case )
# Patch sys.argv
__UpperCamelCase = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 293 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def snake_case_ ( ) -> Tuple:
UpperCAmelCase : List[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
UpperCAmelCase : List[Any] = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert('''RGB''' )
return image
def snake_case_ ( _lowerCAmelCase : Any ) -> Tuple:
UpperCAmelCase : List[str] = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.weight""", f"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.bias""", f"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.weight""", f"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.bias""", f"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.qkv.weight""", f"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.weight""", f"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.bias""", f"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def snake_case_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : str ) -> Any:
UpperCAmelCase : int = dct.pop(lowerCAmelCase__ )
UpperCAmelCase : Optional[int] = val
def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : str ) -> List[Any]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCAmelCase : Optional[Any] = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.q_bias""" )
UpperCAmelCase : List[str] = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
UpperCAmelCase : Dict = torch.cat((q_bias, torch.zeros_like(lowerCAmelCase__ , requires_grad=lowerCAmelCase__ ), v_bias) )
UpperCAmelCase : Optional[Any] = qkv_bias
def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase : List[str] = 364 if """coco""" in model_name else 224
UpperCAmelCase : List[str] = BlipaVisionConfig(image_size=lowerCAmelCase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCAmelCase : List[Any] = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=lowerCAmelCase__ ).to_dict()
elif "opt-6.7b" in model_name:
UpperCAmelCase : Union[str, Any] = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=lowerCAmelCase__ ).to_dict()
elif "t5-xl" in model_name:
UpperCAmelCase : Dict = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCAmelCase : List[Any] = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
UpperCAmelCase : Dict = BlipaConfig(vision_config=lowerCAmelCase__ , text_config=lowerCAmelCase__ )
return config, image_size
@torch.no_grad()
def snake_case_ ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Any=False ) -> int:
UpperCAmelCase : Any = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if """opt""" in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
UpperCAmelCase : Dict = tokenizer('''\n''' , add_special_tokens=lowerCAmelCase__ ).input_ids[0]
UpperCAmelCase : Optional[Any] = get_blipa_config(lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
UpperCAmelCase : Tuple = BlipaForConditionalGeneration(lowerCAmelCase__ ).eval()
UpperCAmelCase : Optional[Any] = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
UpperCAmelCase : List[Any] = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
UpperCAmelCase : Any = """cuda""" if torch.cuda.is_available() else """cpu"""
UpperCAmelCase : int = load_model_and_preprocess(
name=lowerCAmelCase__ , model_type=lowerCAmelCase__ , is_eval=lowerCAmelCase__ , device=lowerCAmelCase__ )
original_model.eval()
print('''Done!''' )
# update state dict keys
UpperCAmelCase : List[Any] = original_model.state_dict()
UpperCAmelCase : Union[str, Any] = create_rename_keys(lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCAmelCase : List[str] = state_dict.pop(lowerCAmelCase__ )
if key.startswith('''Qformer.bert''' ):
UpperCAmelCase : Optional[Any] = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
UpperCAmelCase : int = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
UpperCAmelCase : Optional[Any] = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
UpperCAmelCase : Optional[int] = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
UpperCAmelCase : List[Any] = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
UpperCAmelCase : str = key.replace('''t5''' , '''language''' )
UpperCAmelCase : int = val
# read in qv biases
read_in_q_v_bias(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase : int = hf_model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCAmelCase : str = load_demo_image()
UpperCAmelCase : str = vis_processors["""eval"""](lowerCAmelCase__ ).unsqueeze(0 ).to(lowerCAmelCase__ )
UpperCAmelCase : Tuple = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(lowerCAmelCase__ )
# create processor
UpperCAmelCase : List[str] = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=lowerCAmelCase__ , image_std=lowerCAmelCase__ )
UpperCAmelCase : Dict = BlipaProcessor(image_processor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
UpperCAmelCase : Union[str, Any] = processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values.to(lowerCAmelCase__ )
# make sure processor creates exact same pixel values
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ )
original_model.to(lowerCAmelCase__ )
hf_model.to(lowerCAmelCase__ )
with torch.no_grad():
if "opt" in model_name:
UpperCAmelCase : Tuple = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
UpperCAmelCase : Tuple = hf_model(lowerCAmelCase__ , lowerCAmelCase__ ).logits
else:
UpperCAmelCase : List[str] = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
UpperCAmelCase : List[Any] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
UpperCAmelCase : List[Any] = hf_model(lowerCAmelCase__ , lowerCAmelCase__ , labels=lowerCAmelCase__ ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCAmelCase : Tuple = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=lowerCAmelCase__ )
assert torch.allclose(logits[0, :3, :3] , lowerCAmelCase__ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCAmelCase : int = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=lowerCAmelCase__ )
else:
# cast to same type
UpperCAmelCase : Union[str, Any] = logits.dtype
assert torch.allclose(original_logits.to(lowerCAmelCase__ ) , lowerCAmelCase__ , atol=1e-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
UpperCAmelCase : Tuple = """"""
UpperCAmelCase : int = tokenizer(lowerCAmelCase__ , return_tensors='''pt''' ).input_ids.to(lowerCAmelCase__ )
UpperCAmelCase : Tuple = original_model.generate({'''image''': original_pixel_values} )
UpperCAmelCase : Tuple = hf_model.generate(
lowerCAmelCase__ , lowerCAmelCase__ , do_sample=lowerCAmelCase__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , lowerCAmelCase__ )
UpperCAmelCase : str = input_ids.shape[1]
UpperCAmelCase : int = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowerCAmelCase__ )
UpperCAmelCase : Dict = [text.strip() for text in output_text]
print('''HF generation:''' , lowerCAmelCase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowerCAmelCase__ )
hf_model.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
processor.push_to_hub(f"""nielsr/{model_name}""" )
hf_model.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
UpperCamelCase__: Union[str, Any] = argparse.ArgumentParser()
UpperCamelCase__: int = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
UpperCamelCase__: int = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 127 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
class _a ( _lowerCAmelCase ):
A = CLIPConfig
A = ['''CLIPEncoderLayer''']
def __init__(self, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
super().__init__(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = CLIPVisionModelWithProjection(config.vision_config )
UpperCAmelCase_: Tuple = nn.Linear(config.vision_config.projection_dim, 1 )
UpperCAmelCase_: Tuple = nn.Linear(config.vision_config.projection_dim, 1 )
@torch.no_grad()
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0.5, SCREAMING_SNAKE_CASE_=0.5 ) -> Tuple:
UpperCAmelCase_: Optional[Any] = self.vision_model(SCREAMING_SNAKE_CASE_ )[0]
UpperCAmelCase_: List[str] = self.p_head(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = nsfw_detected.flatten()
UpperCAmelCase_: List[Any] = nsfw_detected > p_threshold
UpperCAmelCase_: Any = nsfw_detected.tolist()
if any(SCREAMING_SNAKE_CASE_ ):
logger.warning(
"""Potential NSFW content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, nsfw_detected_ in enumerate(SCREAMING_SNAKE_CASE_ ):
if nsfw_detected_:
UpperCAmelCase_: Tuple = np.zeros(images[idx].shape )
UpperCAmelCase_: str = self.w_head(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = watermark_detected.flatten()
UpperCAmelCase_: Tuple = watermark_detected > w_threshold
UpperCAmelCase_: int = watermark_detected.tolist()
if any(SCREAMING_SNAKE_CASE_ ):
logger.warning(
"""Potential watermarked content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, watermark_detected_ in enumerate(SCREAMING_SNAKE_CASE_ ):
if watermark_detected_:
UpperCAmelCase_: Any = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 556 | 0 |
class _lowerCamelCase :
"""simple docstring"""
def __init__( self : int , snake_case : Any , snake_case : Any , snake_case : Optional[Any] ):
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = graph
self._normalize_graph(snake_case , snake_case )
__UpperCamelCase = len(snake_case )
__UpperCamelCase = None
def snake_case ( self : str , snake_case : Optional[int] , snake_case : int ):
if sources is int:
__UpperCamelCase = [sources]
if sinks is int:
__UpperCamelCase = [sinks]
if len(snake_case ) == 0 or len(snake_case ) == 0:
return
__UpperCamelCase = sources[0]
__UpperCamelCase = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(snake_case ) > 1 or len(snake_case ) > 1:
__UpperCamelCase = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
__UpperCamelCase = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
__UpperCamelCase = max_input_flow
__UpperCamelCase = 0
__UpperCamelCase = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__UpperCamelCase = max_input_flow
__UpperCamelCase = size - 1
def snake_case ( self : Dict ):
if self.maximum_flow_algorithm is None:
raise Exception('''You need to set maximum flow algorithm before.''' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def snake_case ( self : List[Any] , snake_case : List[str] ):
__UpperCamelCase = algorithm(self )
class _lowerCamelCase :
"""simple docstring"""
def __init__( self : str , snake_case : Any ):
__UpperCamelCase = flow_network
__UpperCamelCase = flow_network.verticesCount
__UpperCamelCase = flow_network.sourceIndex
__UpperCamelCase = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__UpperCamelCase = flow_network.graph
__UpperCamelCase = False
def snake_case ( self : List[str] ):
if not self.executed:
self._algorithm()
__UpperCamelCase = True
def snake_case ( self : Optional[int] ):
pass
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : Tuple , snake_case : Optional[int] ):
super().__init__(snake_case )
# use this to save your result
__UpperCamelCase = -1
def snake_case ( self : Tuple ):
if not self.executed:
raise Exception('''You should execute algorithm before using its result!''' )
return self.maximum_flow
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : Optional[int] , snake_case : Tuple ):
super().__init__(snake_case )
__UpperCamelCase = [[0] * self.verticies_count for i in range(self.verticies_count )]
__UpperCamelCase = [0] * self.verticies_count
__UpperCamelCase = [0] * self.verticies_count
def snake_case ( self : Union[str, Any] ):
__UpperCamelCase = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__UpperCamelCase = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__UpperCamelCase = 0
while i < len(snake_case ):
__UpperCamelCase = vertices_list[i]
__UpperCamelCase = self.heights[vertex_index]
self.process_vertex(snake_case )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(snake_case ) )
__UpperCamelCase = 0
else:
i += 1
__UpperCamelCase = sum(self.preflow[self.source_index] )
def snake_case ( self : Union[str, Any] , snake_case : Any ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(snake_case , snake_case )
self.relabel(snake_case )
def snake_case ( self : Union[str, Any] , snake_case : List[str] , snake_case : int ):
__UpperCamelCase = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def snake_case ( self : Optional[Any] , snake_case : Dict ):
__UpperCamelCase = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__UpperCamelCase = self.heights[to_index]
if min_height is not None:
__UpperCamelCase = min_height + 1
if __name__ == "__main__":
a_ = [0]
a_ = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
a_ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
a_ = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
a_ = flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 711 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ : int = "mctct"
def __init__( self : List[Any] , snake_case : Optional[Any]=8065 , snake_case : Optional[int]=1536 , snake_case : Any=36 , snake_case : List[str]=6144 , snake_case : Dict=4 , snake_case : str=384 , snake_case : List[str]=920 , snake_case : Dict=1E-5 , snake_case : Union[str, Any]=0.3 , snake_case : Optional[Any]="relu" , snake_case : str=0.02 , snake_case : Optional[int]=0.3 , snake_case : int=0.3 , snake_case : Any=1 , snake_case : int=0 , snake_case : Union[str, Any]=2 , snake_case : List[Any]=1 , snake_case : Dict=0.3 , snake_case : int=1 , snake_case : Optional[int]=(7,) , snake_case : List[Any]=(3,) , snake_case : Optional[int]=80 , snake_case : List[str]=1 , snake_case : int=None , snake_case : List[str]="sum" , snake_case : Tuple=False , **snake_case : List[str] , ):
super().__init__(**snake_case , pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = intermediate_size
__UpperCamelCase = num_attention_heads
__UpperCamelCase = attention_head_dim
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = layerdrop
__UpperCamelCase = hidden_act
__UpperCamelCase = initializer_range
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = pad_token_id
__UpperCamelCase = bos_token_id
__UpperCamelCase = eos_token_id
__UpperCamelCase = conv_glu_dim
__UpperCamelCase = conv_dropout
__UpperCamelCase = num_conv_layers
__UpperCamelCase = input_feat_per_channel
__UpperCamelCase = input_channels
__UpperCamelCase = conv_channels
__UpperCamelCase = ctc_loss_reduction
__UpperCamelCase = ctc_zero_infinity
# prevents config testing fail with exporting to json
__UpperCamelCase = list(snake_case )
__UpperCamelCase = list(snake_case )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '''
F"but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, "
F"`config.num_conv_layers = {self.num_conv_layers}`." )
| 375 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : str = tempfile.mkdtemp()
# fmt: off
UpperCamelCase : List[Any] = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
# fmt: on
UpperCamelCase : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
UpperCamelCase : Tuple = {
'do_resize': True,
'size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.5, 0.5, 0.5],
'image_std': [0.5, 0.5, 0.5],
}
UpperCamelCase : str = os.path.join(self.tmpdirname, SCREAMING_SNAKE_CASE_ )
with open(self.image_processor_file, 'w', encoding='utf-8' ) as fp:
json.dump(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, **SCREAMING_SNAKE_CASE_ ) -> List[str]:
return BertTokenizer.from_pretrained(self.tmpdirname, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, **SCREAMING_SNAKE_CASE_ ) -> Any:
return ViTImageProcessor.from_pretrained(self.tmpdirname, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Dict:
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : List[str] = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
UpperCamelCase : Dict = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_, 0, -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : int = self.get_tokenizer()
UpperCamelCase : List[Any] = self.get_image_processor()
UpperCamelCase : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE_, image_processor=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase : List[Any] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer, (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> int:
UpperCamelCase : Dict = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase : Dict = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)' )
UpperCamelCase : str = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_, padding_value=1.0 )
UpperCamelCase : Any = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=SCREAMING_SNAKE_CASE_, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> str:
UpperCamelCase : Dict = self.get_image_processor()
UpperCamelCase : int = self.get_tokenizer()
UpperCamelCase : List[Any] = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE_, image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.prepare_image_inputs()
UpperCamelCase : Any = image_processor(SCREAMING_SNAKE_CASE_, return_tensors='np' )
UpperCamelCase : str = processor(images=SCREAMING_SNAKE_CASE_, return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Optional[Any] = self.get_image_processor()
UpperCamelCase : str = self.get_tokenizer()
UpperCamelCase : List[Any] = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE_, image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = 'lower newer'
UpperCamelCase : Union[str, Any] = processor(text=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def snake_case_ ( self ) -> str:
UpperCamelCase : Dict = self.get_image_processor()
UpperCamelCase : Union[str, Any] = self.get_tokenizer()
UpperCamelCase : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE_, image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = 'lower newer'
UpperCamelCase : Tuple = self.prepare_image_inputs()
UpperCamelCase : List[str] = processor(text=SCREAMING_SNAKE_CASE_, images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ), ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
processor()
def snake_case_ ( self ) -> int:
UpperCamelCase : str = self.get_image_processor()
UpperCamelCase : Optional[int] = self.get_tokenizer()
UpperCamelCase : Dict = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE_, image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase : List[Any] = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Dict:
UpperCamelCase : Tuple = self.get_image_processor()
UpperCamelCase : Optional[Any] = self.get_tokenizer()
UpperCamelCase : List[str] = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE_, image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = 'lower newer'
UpperCamelCase : str = self.prepare_image_inputs()
UpperCamelCase : List[str] = processor(text=SCREAMING_SNAKE_CASE_, images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
| 40 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 40 | 1 |
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class snake_case_ (unittest.TestCase ):
"""simple docstring"""
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_activation("swish")
self.assertIsInstance(lowercase ,nn.SiLU)
self.assertEqual(act(torch.tensor(-100 ,dtype=torch.floataa)).item() ,0)
self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa)).item() ,0)
self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa)).item() ,0)
self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa)).item() ,20)
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = get_activation("silu")
self.assertIsInstance(lowercase ,nn.SiLU)
self.assertEqual(act(torch.tensor(-100 ,dtype=torch.floataa)).item() ,0)
self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa)).item() ,0)
self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa)).item() ,0)
self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa)).item() ,20)
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Tuple = get_activation("mish")
self.assertIsInstance(lowercase ,nn.Mish)
self.assertEqual(act(torch.tensor(-200 ,dtype=torch.floataa)).item() ,0)
self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa)).item() ,0)
self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa)).item() ,0)
self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa)).item() ,20)
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = get_activation("gelu")
self.assertIsInstance(lowercase ,nn.GELU)
self.assertEqual(act(torch.tensor(-100 ,dtype=torch.floataa)).item() ,0)
self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa)).item() ,0)
self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa)).item() ,0)
self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa)).item() ,20)
| 719 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ (lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = AltDiffusionPipeline
_lowerCamelCase = TEXT_TO_IMAGE_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
def A_ ( self):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=32 ,)
UpperCAmelCase_ : Any = DDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=lowercase ,set_alpha_to_one=lowercase ,)
torch.manual_seed(0)
UpperCAmelCase_ : Any = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,)
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0)
UpperCAmelCase_ : Tuple = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,projection_dim=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=5002 ,)
UpperCAmelCase_ : Dict = CLIPTextModel(lowercase)
UpperCAmelCase_ : List[Any] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta")
UpperCAmelCase_ : List[str] = 77
UpperCAmelCase_ : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def A_ ( self ,lowercase ,lowercase=0):
"""simple docstring"""
if str(lowercase).startswith("mps"):
UpperCAmelCase_ : Any = torch.manual_seed(lowercase)
else:
UpperCAmelCase_ : Dict = torch.Generator(device=lowercase).manual_seed(lowercase)
UpperCAmelCase_ : Optional[int] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def A_ ( self):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3)
def A_ ( self):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Any = self.get_dummy_components()
torch.manual_seed(0)
UpperCAmelCase_ : List[Any] = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=5002 ,)
# TODO: remove after fixing the non-deterministic text encoder
UpperCAmelCase_ : Any = RobertaSeriesModelWithTransformation(lowercase)
UpperCAmelCase_ : Optional[Any] = text_encoder
UpperCAmelCase_ : List[Any] = AltDiffusionPipeline(**lowercase)
UpperCAmelCase_ : Union[str, Any] = alt_pipe.to(lowercase)
alt_pipe.set_progress_bar_config(disable=lowercase)
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(lowercase)
UpperCAmelCase_ : Optional[Any] = "A photo of an astronaut"
UpperCAmelCase_ : str = alt_pipe(**lowercase)
UpperCAmelCase_ : List[str] = output.images
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Optional[Any] = np.array(
[0.574_8162, 0.6044_7145, 0.4882_1217, 0.5010_0636, 0.543_1185, 0.4576_3683, 0.4965_7696, 0.4813_2733, 0.4757_3093])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : int = self.get_dummy_components()
UpperCAmelCase_ : Optional[Any] = PNDMScheduler(skip_prk_steps=lowercase)
torch.manual_seed(0)
UpperCAmelCase_ : Any = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=5002 ,)
# TODO: remove after fixing the non-deterministic text encoder
UpperCAmelCase_ : Union[str, Any] = RobertaSeriesModelWithTransformation(lowercase)
UpperCAmelCase_ : Dict = text_encoder
UpperCAmelCase_ : Optional[int] = AltDiffusionPipeline(**lowercase)
UpperCAmelCase_ : List[Any] = alt_pipe.to(lowercase)
alt_pipe.set_progress_bar_config(disable=lowercase)
UpperCAmelCase_ : str = self.get_dummy_inputs(lowercase)
UpperCAmelCase_ : Union[str, Any] = alt_pipe(**lowercase)
UpperCAmelCase_ : int = output.images
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Tuple = np.array(
[0.5160_5093, 0.570_7241, 0.4736_5507, 0.5057_8886, 0.563_3877, 0.464_2503, 0.518_2081, 0.4876_3484, 0.4908_4237])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch_gpu
class snake_case_ (unittest.TestCase ):
"""simple docstring"""
def A_ ( self):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : str = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" ,safety_checker=lowercase)
UpperCAmelCase_ : Any = alt_pipe.to(lowercase)
alt_pipe.set_progress_bar_config(disable=lowercase)
UpperCAmelCase_ : Any = "A painting of a squirrel eating a burger"
UpperCAmelCase_ : Any = torch.manual_seed(0)
UpperCAmelCase_ : Optional[int] = alt_pipe([prompt] ,generator=lowercase ,guidance_scale=6.0 ,num_inference_steps=20 ,output_type="np")
UpperCAmelCase_ : Dict = output.images
UpperCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ : Dict = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = DDIMScheduler.from_pretrained("BAAI/AltDiffusion" ,subfolder="scheduler")
UpperCAmelCase_ : Optional[int] = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" ,scheduler=lowercase ,safety_checker=lowercase)
UpperCAmelCase_ : List[str] = alt_pipe.to(lowercase)
alt_pipe.set_progress_bar_config(disable=lowercase)
UpperCAmelCase_ : str = "A painting of a squirrel eating a burger"
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(0)
UpperCAmelCase_ : List[str] = alt_pipe([prompt] ,generator=lowercase ,num_inference_steps=2 ,output_type="numpy")
UpperCAmelCase_ : int = output.images
UpperCAmelCase_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ : List[Any] = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 455 | 0 |
'''simple docstring'''
def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ) -> Any:
'''simple docstring'''
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__lowercase ,n - 1 ,__lowercase ) * a) % mod
else:
_UpperCamelCase : Tuple = binary_exponentiation(__lowercase ,n / 2 ,__lowercase )
return (b * b) % mod
# a prime number
lowerCAmelCase_ : List[str] = 701
lowerCAmelCase_ : Union[str, Any] = 10_0000_0000
lowerCAmelCase_ : Union[str, Any] = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 435 |
'''simple docstring'''
from math import pi, sqrt, tan
def lowercase__ ( __lowercase : float ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def lowercase__ ( __lowercase : float , __lowercase : float , __lowercase : float ) -> float:
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowercase__ ( __lowercase : float ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def lowercase__ ( __lowercase : float ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def lowercase__ ( __lowercase : float , __lowercase : float ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowercase__ ( __lowercase : float , __lowercase : float , __lowercase : float ) -> float:
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
__UpperCamelCase = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowercase__ ( __lowercase : float , __lowercase : float ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def lowercase__ ( __lowercase : float , __lowercase : float ) -> float:
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(__lowercase , 2 ) * torus_radius * tube_radius
def lowercase__ ( __lowercase : float , __lowercase : float ) -> float:
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def lowercase__ ( __lowercase : float ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def lowercase__ ( __lowercase : float , __lowercase : float ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def lowercase__ ( __lowercase : float , __lowercase : float , __lowercase : float ) -> float:
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
__UpperCamelCase = (sidea + sidea + sidea) / 2
__UpperCamelCase = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def lowercase__ ( __lowercase : float , __lowercase : float ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def lowercase__ ( __lowercase : float , __lowercase : float , __lowercase : float ) -> float:
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def lowercase__ ( __lowercase : float ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def lowercase__ ( __lowercase : float , __lowercase : float ) -> float:
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def lowercase__ ( __lowercase : float , __lowercase : float ) -> float:
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def lowercase__ ( __lowercase : int , __lowercase : float ) -> float:
"""simple docstring"""
if not isinstance(__lowercase , __lowercase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(f'Rectangle: {area_rectangle(10, 20) = }')
print(f'Square: {area_square(10) = }')
print(f'Triangle: {area_triangle(10, 10) = }')
print(f'Triangle: {area_triangle_three_sides(5, 12, 13) = }')
print(f'Parallelogram: {area_parallelogram(10, 20) = }')
print(f'Rhombus: {area_rhombus(10, 20) = }')
print(f'Trapezium: {area_trapezium(10, 20, 30) = }')
print(f'Circle: {area_circle(20) = }')
print(f'Ellipse: {area_ellipse(10, 20) = }')
print('''\nSurface Areas of various geometric shapes: \n''')
print(f'Cube: {surface_area_cube(20) = }')
print(f'Cuboid: {surface_area_cuboid(10, 20, 30) = }')
print(f'Sphere: {surface_area_sphere(20) = }')
print(f'Hemisphere: {surface_area_hemisphere(20) = }')
print(f'Cone: {surface_area_cone(10, 20) = }')
print(f'Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }')
print(f'Cylinder: {surface_area_cylinder(10, 20) = }')
print(f'Torus: {surface_area_torus(20, 10) = }')
print(f'Equilateral Triangle: {area_reg_polygon(3, 10) = }')
print(f'Square: {area_reg_polygon(4, 10) = }')
print(f'Reqular Pentagon: {area_reg_polygon(5, 10) = }')
| 399 | 0 |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
_lowerCamelCase =logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
_lowerCamelCase ={
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
_lowerCamelCase ={
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
_lowerCamelCase =sorted(arg_to_scheduler.keys())
_lowerCamelCase ="{" + ", ".join(arg_to_scheduler_choices) + "}"
class a_ ( pl.LightningModule ):
"""simple docstring"""
def __init__( self : Any ,snake_case : argparse.Namespace ,snake_case : Tuple=None ,snake_case : Optional[int]="base" ,snake_case : Dict=None ,snake_case : int=None ,snake_case : List[Any]=None ,**snake_case : List[str] ,):
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(lowerCamelCase_ )
SCREAMING_SNAKE_CASE =0
SCREAMING_SNAKE_CASE =Path(self.hparams.output_dir )
SCREAMING_SNAKE_CASE =self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path ,**({'num_labels': num_labels} if num_labels is not None else {}) ,cache_dir=lowerCamelCase_ ,**lowerCamelCase_ ,)
else:
SCREAMING_SNAKE_CASE =config
SCREAMING_SNAKE_CASE =("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(self.hparams ,lowerCamelCase_ ,lowerCamelCase_ ):
assert hasattr(self.config ,lowerCamelCase_ ), f'model config doesn\'t have a `{p}` attribute'
setattr(self.config ,lowerCamelCase_ ,getattr(self.hparams ,lowerCamelCase_ ) )
if tokenizer is None:
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path ,cache_dir=lowerCamelCase_ ,)
else:
SCREAMING_SNAKE_CASE =tokenizer
SCREAMING_SNAKE_CASE =MODEL_MODES[mode]
if model is None:
SCREAMING_SNAKE_CASE =self.model_type.from_pretrained(
self.hparams.model_name_or_path ,from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) ,config=self.config ,cache_dir=lowerCamelCase_ ,)
else:
SCREAMING_SNAKE_CASE =model
def _lowerCAmelCase ( self : Optional[Any] ,*snake_case : Tuple ,**snake_case : List[Any] ):
SCREAMING_SNAKE_CASE =self.model_type.from_pretrained(*lowerCamelCase_ ,**lowerCamelCase_ )
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =arg_to_scheduler[self.hparams.lr_scheduler]
SCREAMING_SNAKE_CASE =get_schedule_func(
self.opt ,num_warmup_steps=self.hparams.warmup_steps ,num_training_steps=self.total_steps() )
SCREAMING_SNAKE_CASE ={"""scheduler""": scheduler, """interval""": """step""", """frequency""": 1}
return scheduler
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =self.model
SCREAMING_SNAKE_CASE =["""bias""", """LayerNorm.weight"""]
SCREAMING_SNAKE_CASE =[
{
"""params""": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"""weight_decay""": self.hparams.weight_decay,
},
{
"""params""": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
if self.hparams.adafactor:
SCREAMING_SNAKE_CASE =Adafactor(
lowerCamelCase_ ,lr=self.hparams.learning_rate ,scale_parameter=lowerCamelCase_ ,relative_step=lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE =AdamW(
lowerCamelCase_ ,lr=self.hparams.learning_rate ,eps=self.hparams.adam_epsilon )
SCREAMING_SNAKE_CASE =optimizer
SCREAMING_SNAKE_CASE =self.get_lr_scheduler()
return [optimizer], [scheduler]
def _lowerCAmelCase ( self : Dict ,snake_case : Dict ,snake_case : List[str] ):
return self.validation_step(lowerCamelCase_ ,lowerCamelCase_ )
def _lowerCAmelCase ( self : Any ,snake_case : Optional[int] ):
return self.validation_end(lowerCamelCase_ )
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =max(1 ,self.hparams.gpus ) # TODO: consider num_tpu_cores
SCREAMING_SNAKE_CASE =self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def _lowerCAmelCase ( self : str ,snake_case : Optional[int] ):
if stage == "test":
SCREAMING_SNAKE_CASE =len(self.test_dataloader().dataset )
else:
SCREAMING_SNAKE_CASE =self.get_dataloader('train' ,self.hparams.train_batch_size ,shuffle=lowerCamelCase_ )
SCREAMING_SNAKE_CASE =len(self.train_dataloader().dataset )
def _lowerCAmelCase ( self : Any ,snake_case : str ,snake_case : int ,snake_case : bool = False ):
raise NotImplementedError('You must implement this for your task' )
def _lowerCAmelCase ( self : int ):
return self.train_loader
def _lowerCAmelCase ( self : Any ):
return self.get_dataloader('dev' ,self.hparams.eval_batch_size ,shuffle=lowerCamelCase_ )
def _lowerCAmelCase ( self : List[Any] ):
return self.get_dataloader('test' ,self.hparams.eval_batch_size ,shuffle=lowerCamelCase_ )
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Any ):
return os.path.join(
self.hparams.data_dir ,'cached_{}_{}_{}'.format(
lowerCamelCase_ ,list(filter(lowerCamelCase_ ,self.hparams.model_name_or_path.split('/' ) ) ).pop() ,str(self.hparams.max_seq_length ) ,) ,)
@pl.utilities.rank_zero_only
def _lowerCAmelCase ( self : Dict ,snake_case : Dict[str, Any] ):
SCREAMING_SNAKE_CASE =self.output_dir.joinpath('best_tfmr' )
SCREAMING_SNAKE_CASE =self.step_count
self.model.save_pretrained(lowerCamelCase_ )
self.tokenizer.save_pretrained(lowerCamelCase_ )
@staticmethod
def _lowerCAmelCase ( snake_case : List[Any] ,snake_case : Optional[Any] ):
parser.add_argument(
'--model_name_or_path' ,default=lowerCamelCase_ ,type=lowerCamelCase_ ,required=lowerCamelCase_ ,help='Path to pretrained model or model identifier from huggingface.co/models' ,)
parser.add_argument(
'--config_name' ,default='' ,type=lowerCamelCase_ ,help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' ,default=lowerCamelCase_ ,type=lowerCamelCase_ ,help='Pretrained tokenizer name or path if not the same as model_name' ,)
parser.add_argument(
'--cache_dir' ,default=str(Path(lowerCamelCase_ ).parent / 'test_run' / 'cache' ) ,type=lowerCamelCase_ ,help='Where do you want to store the pre-trained models downloaded from huggingface.co' ,)
parser.add_argument(
'--encoder_layerdrop' ,type=lowerCamelCase_ ,help='Encoder layer dropout probability (Optional). Goes into model.config' ,)
parser.add_argument(
'--decoder_layerdrop' ,type=lowerCamelCase_ ,help='Decoder layer dropout probability (Optional). Goes into model.config' ,)
parser.add_argument(
'--dropout' ,type=lowerCamelCase_ ,help='Dropout probability (Optional). Goes into model.config' ,)
parser.add_argument(
'--attention_dropout' ,type=lowerCamelCase_ ,help='Attention dropout probability (Optional). Goes into model.config' ,)
parser.add_argument('--learning_rate' ,default=5e-5 ,type=lowerCamelCase_ ,help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' ,default='linear' ,choices=lowerCamelCase_ ,metavar=lowerCamelCase_ ,type=lowerCamelCase_ ,help='Learning rate scheduler' ,)
parser.add_argument('--weight_decay' ,default=0.0 ,type=lowerCamelCase_ ,help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' ,default=1e-8 ,type=lowerCamelCase_ ,help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' ,default=0 ,type=lowerCamelCase_ ,help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' ,default=4 ,type=lowerCamelCase_ ,help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' ,dest='max_epochs' ,default=3 ,type=lowerCamelCase_ )
parser.add_argument('--train_batch_size' ,default=32 ,type=lowerCamelCase_ )
parser.add_argument('--eval_batch_size' ,default=32 ,type=lowerCamelCase_ )
parser.add_argument('--adafactor' ,action='store_true' )
class a_ ( pl.Callback ):
"""simple docstring"""
def _lowerCAmelCase ( self : Any ,snake_case : Any ,snake_case : int ):
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class a_ ( pl.Callback ):
"""simple docstring"""
def _lowerCAmelCase ( self : Dict ,snake_case : List[str] ,snake_case : List[str] ):
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(lowerCamelCase_ )
class a_ ( pl.Callback ):
"""simple docstring"""
def _lowerCAmelCase ( self : Any ,snake_case : Union[str, Any] ,snake_case : Optional[Any] ):
SCREAMING_SNAKE_CASE =trainer.lr_schedulers[0]["""scheduler"""]
SCREAMING_SNAKE_CASE ={f'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(lowerCamelCase_ )
def _lowerCAmelCase ( self : List[str] ,snake_case : pl.Trainer ,snake_case : pl.LightningModule ):
rank_zero_info('***** Validation results *****' )
SCREAMING_SNAKE_CASE =trainer.callback_metrics
# Log results
for key in sorted(lowerCamelCase_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(lowerCamelCase_ ,str(metrics[key] ) ) )
def _lowerCAmelCase ( self : Tuple ,snake_case : pl.Trainer ,snake_case : pl.LightningModule ):
rank_zero_info('***** Test results *****' )
SCREAMING_SNAKE_CASE =trainer.callback_metrics
# Log and save results to file
SCREAMING_SNAKE_CASE =os.path.join(pl_module.hparams.output_dir ,'test_results.txt' )
with open(lowerCamelCase_ ,'w' ) as writer:
for key in sorted(lowerCamelCase_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(lowerCamelCase_ ,str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(lowerCamelCase_ ,str(metrics[key] ) ) )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
parser.add_argument(
'--output_dir', default=str(Path(lowerCamelCase_ ).parent / 'test_run' / 'model_checkpoints' ), type=lowerCamelCase_, help='The output directory where the model predictions and checkpoints will be written.', )
parser.add_argument(
'--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit', )
parser.add_argument(
'--fp16_opt_level', type=lowerCamelCase_, default='O2', help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
), )
parser.add_argument('--n_tpu_cores', dest='tpu_cores', type=lowerCamelCase_ )
parser.add_argument('--max_grad_norm', dest='gradient_clip_val', default=1.0, type=lowerCamelCase_, help='Max gradient norm' )
parser.add_argument('--do_train', action='store_true', help='Whether to run training.' )
parser.add_argument('--do_predict', action='store_true', help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps', dest='accumulate_grad_batches', type=lowerCamelCase_, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.', )
parser.add_argument('--seed', type=lowerCamelCase_, default=42, help='random seed for initialization' )
parser.add_argument(
'--data_dir', default=str(Path(lowerCamelCase_ ).parent / 'test_run' / 'dummy-train-data' ), type=lowerCamelCase_, help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.', )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_=None, lowerCAmelCase_=True, lowerCAmelCase_=[], lowerCAmelCase_=None, lowerCAmelCase_=None, **lowerCAmelCase_, ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
SCREAMING_SNAKE_CASE =Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCamelCase_ )
# add custom checkpoints
if checkpoint_callback is None:
SCREAMING_SNAKE_CASE =pl.callbacks.ModelCheckpoint(
filepath=args.output_dir, prefix='checkpoint', monitor='val_loss', mode='min', save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCamelCase_ )
if logging_callback is None:
SCREAMING_SNAKE_CASE =LoggingCallback()
SCREAMING_SNAKE_CASE ={}
if args.fpaa:
SCREAMING_SNAKE_CASE =16
if args.gpus > 1:
SCREAMING_SNAKE_CASE ="""auto"""
SCREAMING_SNAKE_CASE ="""ddp"""
SCREAMING_SNAKE_CASE =args.accumulate_grad_batches
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE ="""auto"""
SCREAMING_SNAKE_CASE =pl.Trainer.from_argparse_args(
lowerCamelCase_, weights_summary=lowerCamelCase_, callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback], logger=lowerCamelCase_, val_check_interval=1, num_sanity_val_steps=2, **lowerCamelCase_, )
if args.do_train:
trainer.fit(lowerCamelCase_ )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer
| 713 |
from ...processing_utils import ProcessorMixin
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = ['image_processor', 'feature_extractor']
__UpperCAmelCase = 'TvltImageProcessor'
__UpperCAmelCase = 'TvltFeatureExtractor'
def __init__( self : Optional[int] ,snake_case : List[str] ,snake_case : Dict ):
super().__init__(image_processor=snake_case ,feature_extractor=snake_case )
SCREAMING_SNAKE_CASE =image_processor
SCREAMING_SNAKE_CASE =feature_extractor
def __call__( self : Dict ,snake_case : Union[str, Any]=None ,snake_case : Optional[int]=None ,snake_case : List[Any]=None ,snake_case : int=None ,snake_case : List[Any]=False ,snake_case : Optional[int]=False ,*snake_case : int ,**snake_case : str ,):
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.' )
SCREAMING_SNAKE_CASE =None
if images is not None:
SCREAMING_SNAKE_CASE =self.image_processor(snake_case ,mask_pixel=snake_case ,*snake_case ,**snake_case )
if images_mixed is not None:
SCREAMING_SNAKE_CASE =self.image_processor(snake_case ,is_mixed=snake_case ,*snake_case ,**snake_case )
if audio is not None:
SCREAMING_SNAKE_CASE =self.feature_extractor(
snake_case ,*snake_case ,sampling_rate=snake_case ,mask_audio=snake_case ,**snake_case )
SCREAMING_SNAKE_CASE ={}
if audio is not None:
output_dict.update(snake_case )
if images is not None:
output_dict.update(snake_case )
if images_mixed_dict is not None:
output_dict.update(snake_case )
return output_dict
@property
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.image_processor.model_input_names
SCREAMING_SNAKE_CASE =self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 252 | 0 |
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowercase__ ( snake_case_ :Union[str, Any] ):
return 1 / (1 + np.exp(-z ))
def lowercase__ ( snake_case_ :int , snake_case_ :Dict ):
return (-y * np.log(snake_case_ ) - (1 - y) * np.log(1 - h )).mean()
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :Tuple , snake_case_ :int ):
__UpperCAmelCase = np.dot(snake_case_ , snake_case_ )
return np.sum(y * scores - np.log(1 + np.exp(snake_case_ ) ) )
def lowercase__ ( snake_case_ :Tuple , snake_case_ :Tuple , snake_case_ :Any , snake_case_ :Dict=70_000 ):
__UpperCAmelCase = np.zeros(x.shape[1] )
for iterations in range(snake_case_ ):
__UpperCAmelCase = np.dot(snake_case_ , snake_case_ )
__UpperCAmelCase = sigmoid_function(snake_case_ )
__UpperCAmelCase = np.dot(x.T , h - y ) / y.size
__UpperCAmelCase = theta - alpha * gradient # updating the weights
__UpperCAmelCase = np.dot(snake_case_ , snake_case_ )
__UpperCAmelCase = sigmoid_function(snake_case_ )
__UpperCAmelCase = cost_function(snake_case_ , snake_case_ )
if iterations % 100 == 0:
print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
_lowercase : Optional[Any] = datasets.load_iris()
_lowercase : Optional[int] = iris.data[:, :2]
_lowercase : List[str] = (iris.target != 0) * 1
_lowercase : str = 0.1
_lowercase : Union[str, Any] = logistic_reg(alpha, x, y, max_iterations=7_00_00)
print('theta: ', theta) # printing the theta i.e our weights vector
def lowercase__ ( snake_case_ :int ):
return sigmoid_function(
np.dot(snake_case_ , snake_case_ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((_lowercase) ,(_lowercase)) : List[Any] = (x[:, 0].min(), x[:, 0].max())
((_lowercase) ,(_lowercase)) : Optional[int] = (x[:, 1].min(), x[:, 1].max())
((_lowercase) ,(_lowercase)) : str = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
_lowercase : Optional[Any] = np.c_[xxa.ravel(), xxa.ravel()]
_lowercase : Optional[int] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 49 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __lowerCAmelCase :
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=3_0 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=3_2 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_0 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=3 , lowerCAmelCase__=None , lowerCAmelCase__=2 , ):
_UpperCAmelCase : Any = parent
_UpperCAmelCase : Union[str, Any] = batch_size
_UpperCAmelCase : int = image_size
_UpperCAmelCase : List[str] = patch_size
_UpperCAmelCase : List[Any] = num_channels
_UpperCAmelCase : Any = is_training
_UpperCAmelCase : Any = use_labels
_UpperCAmelCase : Dict = hidden_size
_UpperCAmelCase : List[Any] = num_hidden_layers
_UpperCAmelCase : Optional[Any] = num_attention_heads
_UpperCAmelCase : List[str] = intermediate_size
_UpperCAmelCase : Optional[Any] = hidden_act
_UpperCAmelCase : List[Any] = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : str = type_sequence_label_size
_UpperCAmelCase : List[str] = initializer_range
_UpperCAmelCase : Optional[int] = scope
_UpperCAmelCase : Any = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
_UpperCAmelCase : List[Any] = (image_size // patch_size) ** 2
_UpperCAmelCase : Dict = num_patches + 2
def snake_case_ (self ):
_UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : str = self.get_config()
return config, pixel_values, labels
def snake_case_ (self ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Dict = TFDeiTModel(config=lowerCAmelCase__ )
_UpperCAmelCase : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Tuple = TFDeiTForMaskedImageModeling(config=lowerCAmelCase__ )
_UpperCAmelCase : int = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCAmelCase : List[str] = 1
_UpperCAmelCase : Optional[int] = TFDeiTForMaskedImageModeling(lowerCAmelCase__ )
_UpperCAmelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Dict = self.type_sequence_label_size
_UpperCAmelCase : Optional[int] = TFDeiTForImageClassification(lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCAmelCase : int = 1
_UpperCAmelCase : Optional[int] = TFDeiTForImageClassification(lowerCAmelCase__ )
_UpperCAmelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase : List[Any] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case_ (self ):
_UpperCAmelCase : int = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = config_and_inputs
_UpperCAmelCase : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __a , __a , unittest.TestCase ):
snake_case : List[str] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
snake_case : Any = (
{
"""feature-extraction""": TFDeiTModel,
"""image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
snake_case : List[str] = False
snake_case : Any = False
snake_case : Optional[Any] = False
snake_case : Any = False
def snake_case_ (self ):
_UpperCAmelCase : Any = TFDeiTModelTester(self )
_UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=3_7 )
def snake_case_ (self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def snake_case_ (self ):
pass
def snake_case_ (self ):
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : int = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_UpperCAmelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , tf.keras.layers.Dense ) )
def snake_case_ (self ):
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : List[Any] = model_class(lowerCAmelCase__ )
_UpperCAmelCase : int = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : List[Any] = [*signature.parameters.keys()]
_UpperCAmelCase : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def snake_case_ (self ):
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def snake_case_ (self ):
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__ )
def snake_case_ (self ):
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ):
_UpperCAmelCase : Optional[Any] = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def snake_case_ (self ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Union[str, Any] = TFDeiTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __A ( ):
_UpperCAmelCase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def snake_case_ (self ):
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def snake_case_ (self ):
_UpperCAmelCase : Union[str, Any] = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
_UpperCAmelCase : Tuple = self.default_image_processor
_UpperCAmelCase : List[Any] = prepare_img()
_UpperCAmelCase : Optional[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="""tf""" )
# forward pass
_UpperCAmelCase : Optional[int] = model(**lowerCAmelCase__ )
# verify the logits
_UpperCAmelCase : Dict = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = tf.constant([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
| 414 | 0 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
_A = True
except (ImportError, AttributeError):
_A = object
def SCREAMING_SNAKE_CASE ( *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
pass
_A = False
_A = logging.get_logger('transformers-cli/serving')
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE__ = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(__UpperCAmelCase , args.host , args.port , args.workers )
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a = 42
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a = 42
a = 42
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a = 42
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a = 42
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
def lowerCAmelCase_ ( _snake_case : ArgumentParser ) -> int:
SCREAMING_SNAKE_CASE__ = parser.add_parser(
"serve" , help="CLI tool to run inference requests through REST and GraphQL endpoints." )
serve_parser.add_argument(
"--task" , type=_snake_case , choices=get_supported_tasks() , help="The task to run the pipeline on" , )
serve_parser.add_argument("--host" , type=_snake_case , default="localhost" , help="Interface the server will listen on." )
serve_parser.add_argument("--port" , type=_snake_case , default=8888 , help="Port the serving will listen to." )
serve_parser.add_argument("--workers" , type=_snake_case , default=1 , help="Number of http workers" )
serve_parser.add_argument("--model" , type=_snake_case , help="Model's name or path to stored model." )
serve_parser.add_argument("--config" , type=_snake_case , help="Model's config name or path to stored model." )
serve_parser.add_argument("--tokenizer" , type=_snake_case , help="Tokenizer name to use." )
serve_parser.add_argument(
"--device" , type=_snake_case , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
serve_parser.set_defaults(func=_snake_case )
def __init__( self : Dict , _snake_case : Pipeline , _snake_case : str , _snake_case : int , _snake_case : int ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = pipeline
SCREAMING_SNAKE_CASE__ = host
SCREAMING_SNAKE_CASE__ = port
SCREAMING_SNAKE_CASE__ = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"Using serve command requires FastAPI and uvicorn. "
"Please install transformers with [serving]: pip install \"transformers[serving]\"."
"Or install FastAPI and uvicorn separately." )
else:
logger.info(F"""Serving model over {host}:{port}""" )
SCREAMING_SNAKE_CASE__ = FastAPI(
routes=[
APIRoute(
"/" , self.model_info , response_model=_snake_case , response_class=_snake_case , methods=["GET"] , ),
APIRoute(
"/tokenize" , self.tokenize , response_model=_snake_case , response_class=_snake_case , methods=["POST"] , ),
APIRoute(
"/detokenize" , self.detokenize , response_model=_snake_case , response_class=_snake_case , methods=["POST"] , ),
APIRoute(
"/forward" , self.forward , response_model=_snake_case , response_class=_snake_case , methods=["POST"] , ),
] , timeout=600 , )
def lowerCAmelCase_ ( self : int ) -> Optional[int]:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def lowerCAmelCase_ ( self : int ) -> Optional[Any]:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def lowerCAmelCase_ ( self : int , _snake_case : str = Body(_snake_case , embed=_snake_case ) , _snake_case : bool = Body(_snake_case , embed=_snake_case ) ) -> List[str]:
try:
SCREAMING_SNAKE_CASE__ = self._pipeline.tokenizer.tokenize(_snake_case )
if return_ids:
SCREAMING_SNAKE_CASE__ = self._pipeline.tokenizer.convert_tokens_to_ids(_snake_case )
return ServeTokenizeResult(tokens=_snake_case , tokens_ids=_snake_case )
else:
return ServeTokenizeResult(tokens=_snake_case )
except Exception as e:
raise HTTPException(status_code=500 , detail={"model": "", "error": str(_snake_case )} )
def lowerCAmelCase_ ( self : str , _snake_case : List[int] = Body(_snake_case , embed=_snake_case ) , _snake_case : bool = Body(_snake_case , embed=_snake_case ) , _snake_case : bool = Body(_snake_case , embed=_snake_case ) , ) -> Optional[int]:
try:
SCREAMING_SNAKE_CASE__ = self._pipeline.tokenizer.decode(_snake_case , _snake_case , _snake_case )
return ServeDeTokenizeResult(model="" , text=_snake_case )
except Exception as e:
raise HTTPException(status_code=500 , detail={"model": "", "error": str(_snake_case )} )
async def lowerCAmelCase_ ( self : List[Any] , _snake_case : Dict=Body(_snake_case , embed=_snake_case ) ) -> List[Any]:
# Check we don't have empty string
if len(_snake_case ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
SCREAMING_SNAKE_CASE__ = self._pipeline(_snake_case )
return ServeForwardResult(output=_snake_case )
except Exception as e:
raise HTTPException(500 , {"error": str(_snake_case )} )
| 538 | """simple docstring"""
from math import pi
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 538 | 1 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
set_seed(7_7_0)
lowerCamelCase_ = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
lowerCamelCase_ = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
lowerCamelCase_ = os.path.dirname(os.path.abspath(__file__))
lowerCamelCase_ = os.path.join(os.path.expanduser("~"), ".cache")
lowerCamelCase_ = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def __lowerCamelCase ( a_ : List[Any] , a_ : List[str]=False ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE :Optional[Any] = model_type
if use_small:
key += "_small"
return os.path.join(a_ , REMOTE_MODEL_PATHS[key]['''file_name'''] )
def __lowerCamelCase ( a_ : List[Any] , a_ : Optional[Any] ) -> List[Any]:
os.makedirs(a_ , exist_ok=a_ )
hf_hub_download(repo_id=a_ , filename=a_ , local_dir=a_ )
def __lowerCamelCase ( a_ : Tuple , a_ : int , a_ : List[str]=False , a_ : str="text" ) -> Optional[int]:
if model_type == "text":
__SCREAMING_SNAKE_CASE :Union[str, Any] = BarkSemanticModel
__SCREAMING_SNAKE_CASE :Optional[Any] = BarkSemanticConfig
__SCREAMING_SNAKE_CASE :List[Any] = BarkSemanticGenerationConfig
elif model_type == "coarse":
__SCREAMING_SNAKE_CASE :Union[str, Any] = BarkCoarseModel
__SCREAMING_SNAKE_CASE :Tuple = BarkCoarseConfig
__SCREAMING_SNAKE_CASE :Optional[Any] = BarkCoarseGenerationConfig
elif model_type == "fine":
__SCREAMING_SNAKE_CASE :str = BarkFineModel
__SCREAMING_SNAKE_CASE :List[str] = BarkFineConfig
__SCREAMING_SNAKE_CASE :List[Any] = BarkFineGenerationConfig
else:
raise NotImplementedError()
__SCREAMING_SNAKE_CASE :Dict = f'''{model_type}_small''' if use_small else model_type
__SCREAMING_SNAKE_CASE :List[str] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(a_ ):
logger.info(f'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' )
_download(model_info['''repo_id'''] , model_info['''file_name'''] )
__SCREAMING_SNAKE_CASE :Dict = torch.load(a_ , map_location=a_ )
# this is a hack
__SCREAMING_SNAKE_CASE :Tuple = checkpoint['''model_args''']
if "input_vocab_size" not in model_args:
__SCREAMING_SNAKE_CASE :Optional[Any] = model_args['''vocab_size''']
__SCREAMING_SNAKE_CASE :Optional[int] = model_args['''vocab_size''']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
__SCREAMING_SNAKE_CASE :Any = model_args.pop('''n_head''' )
__SCREAMING_SNAKE_CASE :List[Any] = model_args.pop('''n_embd''' )
__SCREAMING_SNAKE_CASE :Dict = model_args.pop('''n_layer''' )
__SCREAMING_SNAKE_CASE :Dict = ConfigClass(**checkpoint['''model_args'''] )
__SCREAMING_SNAKE_CASE :int = ModelClass(config=a_ )
__SCREAMING_SNAKE_CASE :List[Any] = GenerationConfigClass()
__SCREAMING_SNAKE_CASE :Any = model_generation_config
__SCREAMING_SNAKE_CASE :List[Any] = checkpoint['''model''']
# fixup checkpoint
__SCREAMING_SNAKE_CASE :Any = '''_orig_mod.'''
for k, v in list(state_dict.items() ):
if k.startswith(a_ ):
# replace part of the key with corresponding layer name in HF implementation
__SCREAMING_SNAKE_CASE :Tuple = k[len(a_ ) :]
for old_layer_name in new_layer_name_dict:
__SCREAMING_SNAKE_CASE :int = new_k.replace(a_ , new_layer_name_dict[old_layer_name] )
__SCREAMING_SNAKE_CASE :Dict = state_dict.pop(a_ )
__SCREAMING_SNAKE_CASE :Dict = set(state_dict.keys() ) - set(model.state_dict().keys() )
__SCREAMING_SNAKE_CASE :List[Any] = {k for k in extra_keys if not k.endswith('''.attn.bias''' )}
__SCREAMING_SNAKE_CASE :Any = set(model.state_dict().keys() ) - set(state_dict.keys() )
__SCREAMING_SNAKE_CASE :Optional[Any] = {k for k in missing_keys if not k.endswith('''.attn.bias''' )}
if len(a_ ) != 0:
raise ValueError(f'''extra keys found: {extra_keys}''' )
if len(a_ ) != 0:
raise ValueError(f'''missing keys: {missing_keys}''' )
model.load_state_dict(a_ , strict=a_ )
__SCREAMING_SNAKE_CASE :int = model.num_parameters(exclude_embeddings=a_ )
__SCREAMING_SNAKE_CASE :str = checkpoint['''best_val_loss'''].item()
logger.info(f'''model loaded: {round(n_params/1e6 , 1 )}M params, {round(a_ , 3 )} loss''' )
model.eval()
model.to(a_ )
del checkpoint, state_dict
return model
def __lowerCamelCase ( a_ : Any , a_ : List[Any]=False , a_ : List[str]="text" ) -> Optional[Any]:
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
__SCREAMING_SNAKE_CASE :int = '''cpu''' # do conversion on cpu
__SCREAMING_SNAKE_CASE :int = _get_ckpt_path(a_ , use_small=a_ )
__SCREAMING_SNAKE_CASE :int = _load_model(a_ , a_ , model_type=a_ , use_small=a_ )
# load bark initial model
__SCREAMING_SNAKE_CASE :Optional[Any] = _bark_load_model(a_ , '''cpu''' , model_type=a_ , use_small=a_ )
if model_type == "text":
__SCREAMING_SNAKE_CASE :List[Any] = bark_model['''model''']
if model.num_parameters(exclude_embeddings=a_ ) != bark_model.get_num_params():
raise ValueError('''initial and new models don\'t have the same number of parameters''' )
# check if same output as the bark model
__SCREAMING_SNAKE_CASE :Optional[Any] = 5
__SCREAMING_SNAKE_CASE :int = 10
if model_type in ["text", "coarse"]:
__SCREAMING_SNAKE_CASE :List[str] = torch.randint(2_56 , (batch_size, sequence_length) , dtype=torch.int )
__SCREAMING_SNAKE_CASE :Optional[Any] = bark_model(a_ )[0]
__SCREAMING_SNAKE_CASE :List[str] = model(a_ )
# take last logits
__SCREAMING_SNAKE_CASE :Any = output_new_model_total.logits[:, [-1], :]
else:
__SCREAMING_SNAKE_CASE :List[str] = 3
__SCREAMING_SNAKE_CASE :Any = 8
__SCREAMING_SNAKE_CASE :Any = torch.randint(2_56 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
__SCREAMING_SNAKE_CASE :Any = model(a_ , a_ )
__SCREAMING_SNAKE_CASE :str = bark_model(a_ , a_ )
__SCREAMING_SNAKE_CASE :List[str] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('''initial and new outputs don\'t have the same shape''' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('''initial and new outputs are not equal''' )
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
def __lowerCamelCase ( a_ : Dict , a_ : str , a_ : Optional[Any] , a_ : List[str] , a_ : Tuple , a_ : Tuple , ) -> List[str]:
__SCREAMING_SNAKE_CASE :Union[str, Any] = os.path.join(a_ , a_ )
__SCREAMING_SNAKE_CASE :str = BarkSemanticConfig.from_pretrained(os.path.join(a_ , '''config.json''' ) )
__SCREAMING_SNAKE_CASE :Optional[Any] = BarkCoarseConfig.from_pretrained(os.path.join(a_ , '''config.json''' ) )
__SCREAMING_SNAKE_CASE :Optional[Any] = BarkFineConfig.from_pretrained(os.path.join(a_ , '''config.json''' ) )
__SCREAMING_SNAKE_CASE :Optional[int] = EncodecConfig.from_pretrained('''facebook/encodec_24khz''' )
__SCREAMING_SNAKE_CASE :List[str] = BarkSemanticModel.from_pretrained(a_ )
__SCREAMING_SNAKE_CASE :str = BarkCoarseModel.from_pretrained(a_ )
__SCREAMING_SNAKE_CASE :Any = BarkFineModel.from_pretrained(a_ )
__SCREAMING_SNAKE_CASE :Optional[int] = EncodecModel.from_pretrained('''facebook/encodec_24khz''' )
__SCREAMING_SNAKE_CASE :Any = BarkConfig.from_sub_model_configs(
a_ , a_ , a_ , a_ )
__SCREAMING_SNAKE_CASE :Optional[Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
__SCREAMING_SNAKE_CASE :Tuple = BarkModel(a_ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = semantic
__SCREAMING_SNAKE_CASE :List[str] = coarseAcoustic
__SCREAMING_SNAKE_CASE :Union[str, Any] = fineAcoustic
__SCREAMING_SNAKE_CASE :str = codec
__SCREAMING_SNAKE_CASE :Union[str, Any] = bark_generation_config
Path(a_ ).mkdir(exist_ok=a_ )
bark.save_pretrained(a_ , repo_id=a_ , push_to_hub=a_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
lowerCamelCase_ = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small) | 498 |
"""simple docstring"""
import argparse
import os
import re
lowerCamelCase_ = "src/transformers/models/auto"
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
lowerCamelCase_ = re.compile(r"[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict")
# re pattern that matches identifiers in mappings
lowerCamelCase_ = re.compile(r"\s*\(\s*\"(\S[^\"]+)\"")
def __lowerCamelCase ( a_ : Optional[Any] , a_ : bool = False ) -> Tuple:
with open(a_ , '''r''' , encoding='''utf-8''' ) as f:
__SCREAMING_SNAKE_CASE :Union[str, Any] = f.read()
__SCREAMING_SNAKE_CASE :Dict = content.split('''\n''' )
__SCREAMING_SNAKE_CASE :List[Any] = []
__SCREAMING_SNAKE_CASE :Optional[int] = 0
while line_idx < len(a_ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__SCREAMING_SNAKE_CASE :str = len(re.search(r'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
__SCREAMING_SNAKE_CASE :Dict = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__SCREAMING_SNAKE_CASE :List[str] = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__SCREAMING_SNAKE_CASE :Optional[Any] = sorted(a_ , key=lambda a_ : _re_identifier.search(a_ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(a_ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(a_ ) )
elif "\n".join(a_ ) != content:
return True
def __lowerCamelCase ( a_ : bool = False ) -> int:
__SCREAMING_SNAKE_CASE :str = [os.path.join(a_ , a_ ) for f in os.listdir(a_ ) if f.endswith('''.py''' )]
__SCREAMING_SNAKE_CASE :List[str] = [sort_auto_mapping(a_ , overwrite=a_ ) for fname in fnames]
if not overwrite and any(a_ ):
__SCREAMING_SNAKE_CASE :str = [f for f, d in zip(a_ , a_ ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {", ".join(a_ )}. Run `make style` to fix'''
''' this.''' )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
lowerCamelCase_ = parser.parse_args()
sort_all_auto_mappings(not args.check_only) | 498 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ : Optional[int] = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : List[Any] = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 718 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ : Tuple = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : str = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 545 | 0 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class lowerCamelCase_ :
def __init__( self , __lowerCAmelCase = None ):
"""simple docstring"""
if components is None:
__magic_name__ :List[Any] = []
__magic_name__ :str = list(__lowerCAmelCase )
def __len__( self ):
"""simple docstring"""
return len(self.__components )
def __str__( self ):
"""simple docstring"""
return "(" + ",".join(map(__lowerCAmelCase , self.__components ) ) + ")"
def __add__( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Dict = len(self )
if size == len(__lowerCAmelCase ):
__magic_name__ :Dict = [self.__components[i] + other.component(__lowerCAmelCase ) for i in range(__lowerCAmelCase )]
return Vector(__lowerCAmelCase )
else:
raise Exception('''must have the same size''' )
def __sub__( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = len(self )
if size == len(__lowerCAmelCase ):
__magic_name__ :Tuple = [self.__components[i] - other.component(__lowerCAmelCase ) for i in range(__lowerCAmelCase )]
return Vector(__lowerCAmelCase )
else: # error case
raise Exception('''must have the same size''' )
@overload
def __mul__( self , __lowerCAmelCase ):
"""simple docstring"""
...
@overload
def __mul__( self , __lowerCAmelCase ):
"""simple docstring"""
...
def __mul__( self , __lowerCAmelCase ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , (float, int) ):
__magic_name__ :List[str] = [c * other for c in self.__components]
return Vector(__lowerCAmelCase )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(self ) == len(__lowerCAmelCase ):
__magic_name__ :Optional[int] = len(self )
__magic_name__ :Union[str, Any] = [self.__components[i] * other.component(__lowerCAmelCase ) for i in range(__lowerCAmelCase )]
return sum(__lowerCAmelCase )
else: # error case
raise Exception('''invalid operand!''' )
def A ( self ):
"""simple docstring"""
return Vector(self.__components )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('''index out of range''' )
def A ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
assert -len(self.__components ) <= pos < len(self.__components )
__magic_name__ :Optional[int] = value
def A ( self ):
"""simple docstring"""
if len(self.__components ) == 0:
raise Exception('''Vector is empty''' )
__magic_name__ :Dict = [c**2 for c in self.__components]
return math.sqrt(sum(__lowerCAmelCase ) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase = False ):
"""simple docstring"""
__magic_name__ :str = self * other
__magic_name__ :int = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __lowercase ( snake_case ):
"""simple docstring"""
assert isinstance(snake_case, snake_case )
return Vector([0] * dimension )
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
assert isinstance(snake_case, snake_case ) and (isinstance(snake_case, snake_case ))
__magic_name__ :List[str] = [0] * dimension
__magic_name__ :int = 1
return Vector(snake_case )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
assert (
isinstance(snake_case, snake_case )
and isinstance(snake_case, snake_case )
and (isinstance(snake_case, (int, float) ))
)
return x * scalar + y
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
random.seed(snake_case )
__magic_name__ :Any = [random.randint(snake_case, snake_case ) for _ in range(snake_case )]
return Vector(snake_case )
class lowerCamelCase_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = matrix
__magic_name__ :Union[str, Any] = w
__magic_name__ :Union[str, Any] = h
def __str__( self ):
"""simple docstring"""
__magic_name__ :List[Any] = ''''''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , __lowerCAmelCase ):
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
__magic_name__ :str = []
for i in range(self.__height ):
__magic_name__ :List[str] = [
self.__matrix[i][j] + other.component(__lowerCAmelCase , __lowerCAmelCase )
for j in range(self.__width )
]
matrix.append(__lowerCAmelCase )
return Matrix(__lowerCAmelCase , self.__width , self.__height )
else:
raise Exception('''matrix must have the same dimension!''' )
def __sub__( self , __lowerCAmelCase ):
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
__magic_name__ :int = []
for i in range(self.__height ):
__magic_name__ :Tuple = [
self.__matrix[i][j] - other.component(__lowerCAmelCase , __lowerCAmelCase )
for j in range(self.__width )
]
matrix.append(__lowerCAmelCase )
return Matrix(__lowerCAmelCase , self.__width , self.__height )
else:
raise Exception('''matrices must have the same dimension!''' )
@overload
def __mul__( self , __lowerCAmelCase ):
"""simple docstring"""
...
@overload
def __mul__( self , __lowerCAmelCase ):
"""simple docstring"""
...
def __mul__( self , __lowerCAmelCase ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase ): # matrix-vector
if len(__lowerCAmelCase ) == self.__width:
__magic_name__ :Tuple = zero_vector(self.__height )
for i in range(self.__height ):
__magic_name__ :Optional[int] = [
self.__matrix[i][j] * other.component(__lowerCAmelCase )
for j in range(self.__width )
]
ans.change_component(__lowerCAmelCase , sum(__lowerCAmelCase ) )
return ans
else:
raise Exception(
'''vector must have the same size as the '''
'''number of columns of the matrix!''' )
elif isinstance(__lowerCAmelCase , (int, float) ): # matrix-scalar
__magic_name__ :Optional[Any] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(__lowerCAmelCase , self.__width , self.__height )
return None
def A ( self ):
"""simple docstring"""
return self.__height
def A ( self ):
"""simple docstring"""
return self.__width
def A ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('''change_component: indices out of bounds''' )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
__magic_name__ :Union[str, Any] = value
else:
raise Exception('''change_component: indices out of bounds''' )
def A ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
__magic_name__ :Optional[int] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(__lowerCAmelCase ) ):
__magic_name__ :Optional[Any] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(__lowerCAmelCase , self.__width - 1 , self.__height - 1 ).determinant()
def A ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(__lowerCAmelCase , __lowerCAmelCase )
else:
raise Exception('''Indices out of bounds''' )
def A ( self ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if self.__height < 1:
raise Exception('''Matrix has no element''' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__magic_name__ :int = [
self.__matrix[0][y] * self.cofactor(0 , __lowerCAmelCase ) for y in range(self.__width )
]
return sum(__lowerCAmelCase )
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :list[list[float]] = [[0] * n for _ in range(snake_case )]
return Matrix(snake_case, snake_case, snake_case )
def __lowercase ( snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
random.seed(snake_case )
__magic_name__ :list[list[float]] = [
[random.randint(snake_case, snake_case ) for _ in range(snake_case )] for _ in range(snake_case )
]
return Matrix(snake_case, snake_case, snake_case )
| 0 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowercase ( unittest.TestCase):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
UpperCAmelCase_, UpperCAmelCase_= FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-canny""" , from_pt=__UpperCAmelCase , dtype=jnp.bfloataa )
UpperCAmelCase_, UpperCAmelCase_= FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=__UpperCAmelCase , from_pt=__UpperCAmelCase , dtype=jnp.bfloataa )
UpperCAmelCase_= controlnet_params
UpperCAmelCase_= """bird"""
UpperCAmelCase_= jax.device_count()
UpperCAmelCase_= pipe.prepare_text_inputs([prompts] * num_samples )
UpperCAmelCase_= load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" )
UpperCAmelCase_= pipe.prepare_image_inputs([canny_image] * num_samples )
UpperCAmelCase_= jax.random.PRNGKey(0 )
UpperCAmelCase_= jax.random.split(__UpperCAmelCase , jax.device_count() )
UpperCAmelCase_= replicate(__UpperCAmelCase )
UpperCAmelCase_= shard(__UpperCAmelCase )
UpperCAmelCase_= shard(__UpperCAmelCase )
UpperCAmelCase_= pipe(
prompt_ids=__UpperCAmelCase , image=__UpperCAmelCase , params=__UpperCAmelCase , prng_seed=__UpperCAmelCase , num_inference_steps=50 , jit=__UpperCAmelCase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
UpperCAmelCase_= images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCAmelCase_= images[0, 253:256, 253:256, -1]
UpperCAmelCase_= jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCAmelCase_= jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
UpperCAmelCase_, UpperCAmelCase_= FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-openpose""" , from_pt=__UpperCAmelCase , dtype=jnp.bfloataa )
UpperCAmelCase_, UpperCAmelCase_= FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=__UpperCAmelCase , from_pt=__UpperCAmelCase , dtype=jnp.bfloataa )
UpperCAmelCase_= controlnet_params
UpperCAmelCase_= """Chef in the kitchen"""
UpperCAmelCase_= jax.device_count()
UpperCAmelCase_= pipe.prepare_text_inputs([prompts] * num_samples )
UpperCAmelCase_= load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" )
UpperCAmelCase_= pipe.prepare_image_inputs([pose_image] * num_samples )
UpperCAmelCase_= jax.random.PRNGKey(0 )
UpperCAmelCase_= jax.random.split(__UpperCAmelCase , jax.device_count() )
UpperCAmelCase_= replicate(__UpperCAmelCase )
UpperCAmelCase_= shard(__UpperCAmelCase )
UpperCAmelCase_= shard(__UpperCAmelCase )
UpperCAmelCase_= pipe(
prompt_ids=__UpperCAmelCase , image=__UpperCAmelCase , params=__UpperCAmelCase , prng_seed=__UpperCAmelCase , num_inference_steps=50 , jit=__UpperCAmelCase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
UpperCAmelCase_= images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCAmelCase_= images[0, 253:256, 253:256, -1]
UpperCAmelCase_= jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCAmelCase_= jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 593 | 0 |
def _snake_case (__lowercase):
if not isinstance(lowercase_ , lowercase_):
UpperCamelCase_ = f"""Input value of [number={number}] must be an integer"""
raise TypeError(lowercase_)
if number < 1:
UpperCamelCase_ = f"""Input value of [number={number}] must be > 0"""
raise ValueError(lowercase_)
UpperCamelCase_ = 1
for i in range(1 , lowercase_):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
snake_case__ : List[str] = logging.get_logger(__name__)
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> None:
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 618 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.