code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import math
import sys
def _A ( lowerCAmelCase_ : str ):
"""simple docstring"""
lowerCAmelCase__ = ""
try:
with open(lowerCAmelCase_ , "rb" ) as binary_file:
lowerCAmelCase__ = binary_file.read()
for dat in data:
lowerCAmelCase__ = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def _A ( lowerCAmelCase_ : str ):
"""simple docstring"""
lowerCAmelCase__ = {"0": "0", "1": "1"}
lowerCAmelCase__ , lowerCAmelCase__ = "", ""
lowerCAmelCase__ = len(lowerCAmelCase_ )
for i in range(len(lowerCAmelCase_ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowerCAmelCase__ = lexicon[curr_string]
result += last_match_id
lowerCAmelCase__ = last_match_id + "0"
if math.loga(lowerCAmelCase_ ).is_integer():
lowerCAmelCase__ = {}
for curr_key in list(lowerCAmelCase_ ):
lowerCAmelCase__ = lexicon.pop(lowerCAmelCase_ )
lowerCAmelCase__ = new_lex
lowerCAmelCase__ = last_match_id + "1"
index += 1
lowerCAmelCase__ = ""
return result
def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
"""simple docstring"""
lowerCAmelCase__ = 8
try:
with open(lowerCAmelCase_ , "wb" ) as opened_file:
lowerCAmelCase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(lowerCAmelCase_ ) , lowerCAmelCase_ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(lowerCAmelCase_ , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def _A ( lowerCAmelCase_ : str ):
"""simple docstring"""
lowerCAmelCase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowerCAmelCase__ = data_bits[counter:]
lowerCAmelCase__ = data_bits[counter + 1 :]
return data_bits
def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
"""simple docstring"""
lowerCAmelCase__ = read_file_binary(lowerCAmelCase_ )
lowerCAmelCase__ = remove_prefix(lowerCAmelCase_ )
lowerCAmelCase__ = decompress_data(lowerCAmelCase_ )
write_file_binary(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 61 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : Any = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''resnet'''
lowerCamelCase__ = ['''basic''', '''bottleneck''']
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE="bottleneck" , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
snake_case__ : List[Any] = num_channels
snake_case__ : str = embedding_size
snake_case__ : List[Any] = hidden_sizes
snake_case__ : Dict = depths
snake_case__ : List[Any] = layer_type
snake_case__ : int = hidden_act
snake_case__ : Union[str, Any] = downsample_in_first_stage
snake_case__ : Dict = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
snake_case__ , snake_case__ : Any = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1e-3
| 38 | 0 |
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
SCREAMING_SNAKE_CASE : Optional[int] = gray_code_sequence_string(lowercase )
#
# convert them to integers
for i in range(len(lowercase ) ):
SCREAMING_SNAKE_CASE : Optional[int] = int(sequence[i] , 2 )
return sequence
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
SCREAMING_SNAKE_CASE : Optional[int] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
SCREAMING_SNAKE_CASE : str = gray_code_sequence_string(bit_count - 1 )
SCREAMING_SNAKE_CASE : Any = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
SCREAMING_SNAKE_CASE : Tuple = "0" + smaller_sequence[i]
sequence.append(lowercase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
SCREAMING_SNAKE_CASE : Dict = "1" + smaller_sequence[i]
sequence.append(lowercase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 |
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 38 | 0 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
a : str = logging.get_logger(__name__)
class a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , *__lowercase : Tuple , **__lowercase : str ) -> None:
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , __lowercase , )
super().__init__(*__lowercase , **__lowercase )
| 63 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ):
snake_case__ : str = []
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_init_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_train_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_train_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_epoch_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_epoch_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_step_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_step_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_evaluate""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_predict""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_save""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_log""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_prediction_step""" )
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Tuple = tempfile.mkdtemp()
def __UpperCamelCase ( self ):
shutil.rmtree(self.output_dir )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
snake_case__ : List[Any] = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionModelConfig(a=__SCREAMING_SNAKE_CASE , b=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionPreTrainedModel(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = TrainingArguments(self.output_dir , disable_tqdm=__SCREAMING_SNAKE_CASE , report_to=[] , **__SCREAMING_SNAKE_CASE )
return Trainer(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , train_dataset=__SCREAMING_SNAKE_CASE , eval_dataset=__SCREAMING_SNAKE_CASE , callbacks=__SCREAMING_SNAKE_CASE , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
# Order doesn't matter
snake_case__ : Tuple = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
snake_case__ : List[str] = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
for cba, cba in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , cba.__class__ )
elif not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(cba.__class__ , __SCREAMING_SNAKE_CASE )
else:
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = ["""on_init_end""", """on_train_begin"""]
snake_case__ : Union[str, Any] = 0
snake_case__ : Dict = len(trainer.get_eval_dataloader() )
snake_case__ : Any = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("""on_epoch_begin""" )
for _ in range(__SCREAMING_SNAKE_CASE ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""" )
expected_events.append("""on_epoch_end""" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __UpperCamelCase ( self ):
snake_case__ : Any = self.get_trainer()
snake_case__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# Callbacks passed at init are added to the default callbacks
snake_case__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case__ : Optional[Any] = self.get_trainer(disable_tqdm=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case__ : int = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.remove(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = self.get_trainer()
snake_case__ : List[str] = trainer.pop_callback(__SCREAMING_SNAKE_CASE )
self.assertEqual(cb.__class__ , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
trainer.add_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# We can also add, pop, or remove by instance
snake_case__ : List[Any] = self.get_trainer()
snake_case__ : List[str] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.remove(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = self.get_trainer()
snake_case__ : Any = trainer.callback_handler.callbacks[0]
snake_case__ : Optional[Any] = trainer.pop_callback(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
trainer.add_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# Independent log/save/eval
snake_case__ : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" )
trainer.train()
snake_case__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# A bit of everything
snake_case__ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
snake_case__ : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock:
snake_case__ : List[str] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(__SCREAMING_SNAKE_CASE ) in warn_mock.call_args[0][0]
| 38 | 0 |
from __future__ import annotations
from typing import Any
def A__ ( snake_case_ : list[Any] ):
create_state_space_tree(snake_case_ , [] , 0 )
def A__ ( snake_case_ : list[Any] , snake_case_ : list[Any] , snake_case_ : int ):
if index == len(snake_case_ ):
print(snake_case_ )
return
create_state_space_tree(snake_case_ , snake_case_ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(snake_case_ , snake_case_ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
lowercase_ : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq)
| 64 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=1_8 , __SCREAMING_SNAKE_CASE=3_0 , __SCREAMING_SNAKE_CASE=4_0_0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ):
snake_case__ : Any = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case__ : List[Any] = parent
snake_case__ : int = batch_size
snake_case__ : List[Any] = num_channels
snake_case__ : str = image_size
snake_case__ : Union[str, Any] = min_resolution
snake_case__ : List[Any] = max_resolution
snake_case__ : Tuple = do_resize
snake_case__ : int = size
snake_case__ : Tuple = do_normalize
snake_case__ : Dict = image_mean
snake_case__ : Union[str, Any] = image_std
def __UpperCamelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = DPTImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self ):
snake_case__ : str = DPTImageProcessingTester(self )
@property
def __UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_std""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """size""" ) )
def __UpperCamelCase ( self ):
snake_case__ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} )
snake_case__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
snake_case__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
snake_case__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : Any = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
snake_case__ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 38 | 0 |
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : str = len(__UpperCamelCase )
UpperCAmelCase__ : str = [[0] * n for i in range(__UpperCamelCase )]
for i in range(__UpperCamelCase ):
UpperCAmelCase__ : int = y_points[i]
for i in range(2 , __UpperCamelCase ):
for j in range(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : Optional[Any] = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """embed_dim""" ) )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """num_heads""" ) )
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[1_6, 4_8, 9_6] , __SCREAMING_SNAKE_CASE=[1, 3, 6] , __SCREAMING_SNAKE_CASE=[1, 2, 1_0] , __SCREAMING_SNAKE_CASE=[7, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2] , __SCREAMING_SNAKE_CASE=[2, 1, 1] , __SCREAMING_SNAKE_CASE=[2, 2, 2] , __SCREAMING_SNAKE_CASE=[False, False, True] , __SCREAMING_SNAKE_CASE=[0.0, 0.0, 0.0] , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-1_2 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=2 , ):
snake_case__ : List[str] = parent
snake_case__ : Tuple = batch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : List[Any] = patch_sizes
snake_case__ : Optional[int] = patch_stride
snake_case__ : Optional[Any] = patch_padding
snake_case__ : Any = is_training
snake_case__ : int = use_labels
snake_case__ : Dict = num_labels
snake_case__ : Optional[Any] = num_channels
snake_case__ : Optional[Any] = embed_dim
snake_case__ : Optional[int] = num_heads
snake_case__ : Optional[int] = stride_kv
snake_case__ : int = depth
snake_case__ : Optional[Any] = cls_token
snake_case__ : List[Any] = attention_drop_rate
snake_case__ : Union[str, Any] = initializer_range
snake_case__ : List[Any] = layer_norm_eps
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : List[Any] = None
if self.use_labels:
# create a random int32 tensor of given shape
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : List[str] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = TFCvtModel(config=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = (self.image_size, self.image_size)
snake_case__ , snake_case__ : str = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
snake_case__ : Any = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
snake_case__ : Optional[int] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = self.num_labels
snake_case__ : str = TFCvtForImageClassification(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Any = config_and_inputs
snake_case__ : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
lowerCamelCase__ = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = TFCvtModelTester(self )
snake_case__ : Any = TFCvtConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def __UpperCamelCase ( self ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def __UpperCamelCase ( self ):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def __UpperCamelCase ( self ):
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def __UpperCamelCase ( self ):
snake_case__ : List[str] = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(__SCREAMING_SNAKE_CASE )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Optional[Any] = [*signature.parameters.keys()]
snake_case__ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
def check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[int] = outputs.hidden_states
snake_case__ : Tuple = len(self.model_tester.depth )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[Any] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : List[str] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def __UpperCamelCase ( self ):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : str = TFCvtModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self ):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case__ : Union[str, Any] = self.default_image_processor
snake_case__ : int = prepare_img()
snake_case__ : Dict = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""tf""" )
# forward pass
snake_case__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
snake_case__ : str = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : int = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 38 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Optional[Any] = "data2vec-vision"
def __init__( self , _lowerCAmelCase=7_6_8 , _lowerCAmelCase=1_2 , _lowerCAmelCase=1_2 , _lowerCAmelCase=3_0_7_2 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=2_2_4 , _lowerCAmelCase=1_6 , _lowerCAmelCase=3 , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=True , _lowerCAmelCase=[3, 5, 7, 1_1] , _lowerCAmelCase=[1, 2, 3, 6] , _lowerCAmelCase=True , _lowerCAmelCase=0.4 , _lowerCAmelCase=2_5_6 , _lowerCAmelCase=1 , _lowerCAmelCase=False , _lowerCAmelCase=2_5_5 , **_lowerCAmelCase , ):
super().__init__(**_lowerCAmelCase )
_lowercase : int = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : List[Any] = num_attention_heads
_lowercase : Optional[Any] = intermediate_size
_lowercase : int = hidden_act
_lowercase : Optional[Any] = hidden_dropout_prob
_lowercase : List[str] = attention_probs_dropout_prob
_lowercase : Union[str, Any] = initializer_range
_lowercase : Dict = layer_norm_eps
_lowercase : int = image_size
_lowercase : List[str] = patch_size
_lowercase : Optional[int] = num_channels
_lowercase : Tuple = use_mask_token
_lowercase : Dict = use_absolute_position_embeddings
_lowercase : Union[str, Any] = use_relative_position_bias
_lowercase : List[Any] = use_shared_relative_position_bias
_lowercase : Tuple = layer_scale_init_value
_lowercase : Any = drop_path_rate
_lowercase : Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
_lowercase : Dict = out_indices
_lowercase : int = pool_scales
# auxiliary head attributes (semantic segmentation)
_lowercase : Tuple = use_auxiliary_head
_lowercase : str = auxiliary_loss_weight
_lowercase : Tuple = auxiliary_channels
_lowercase : Dict = auxiliary_num_convs
_lowercase : Optional[int] = auxiliary_concat_input
_lowercase : List[Any] = semantic_loss_ignore_index
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : int = version.parse("1.11" )
@property
def __a ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __a ( self ):
return 1E-4
| 66 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
snake_case__ : int = [[1, 2, 4], [1, 2, 3, 4]]
snake_case__ : Any = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
self.assertTrue(isinstance(dc.token_ids , __SCREAMING_SNAKE_CASE ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __UpperCamelCase ( self ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
snake_case__ : Union[str, Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) # fails here
def __UpperCamelCase ( self ):
snake_case__ : List[str] = [[1, 2, 3], [1, 2, 4]]
snake_case__ : Optional[int] = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ , snake_case__ : Any = dc.update(1 )
snake_case__ : Any = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : Tuple = dc.update(2 )
snake_case__ : Tuple = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = dc.update(3 )
snake_case__ : List[str] = stepped is True and completed is True and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
snake_case__ : int = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ , snake_case__ : str = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : str = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : Dict = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 38 | 0 |
from math import isqrt
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(snake_case__ ) + 1 ) )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int = 10**6 ) -> int:
_lowercase = 0
_lowercase = 1
_lowercase = 7
while prime_candidate < max_prime:
primes_count += is_prime(snake_case__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"""{solution() = }""") | 67 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Optional[int] = logging.get_logger(__name__)
A_ : Tuple = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''segformer'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[2, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[8, 4, 2, 1] , __SCREAMING_SNAKE_CASE=[3_2, 6_4, 1_6_0, 2_5_6] , __SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[1, 2, 5, 8] , __SCREAMING_SNAKE_CASE=[4, 4, 4, 4] , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1e-6 , __SCREAMING_SNAKE_CASE=2_5_6 , __SCREAMING_SNAKE_CASE=2_5_5 , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __SCREAMING_SNAKE_CASE , )
snake_case__ : Dict = num_channels
snake_case__ : Optional[Any] = num_encoder_blocks
snake_case__ : Any = depths
snake_case__ : Optional[int] = sr_ratios
snake_case__ : Tuple = hidden_sizes
snake_case__ : List[str] = patch_sizes
snake_case__ : str = strides
snake_case__ : Optional[int] = mlp_ratios
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : Dict = hidden_act
snake_case__ : Optional[int] = hidden_dropout_prob
snake_case__ : List[str] = attention_probs_dropout_prob
snake_case__ : List[Any] = classifier_dropout_prob
snake_case__ : int = initializer_range
snake_case__ : List[str] = drop_path_rate
snake_case__ : int = layer_norm_eps
snake_case__ : List[Any] = decoder_hidden_size
snake_case__ : List[Any] = kwargs.get("""reshape_last_stage""" , __SCREAMING_SNAKE_CASE )
snake_case__ : Dict = semantic_loss_ignore_index
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1e-4
@property
def __UpperCamelCase ( self ):
return 1_2
| 38 | 0 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class _A ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Dict = CpmAntTokenizer
lowerCamelCase : int = False
def _a ( self : Dict ) -> List[str]:
super().setUp()
__UpperCAmelCase =[
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
__UpperCAmelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def _a ( self : Optional[Any] ) -> Optional[int]:
__UpperCAmelCase =CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
__UpperCAmelCase ="""今天天气真好!"""
__UpperCAmelCase =["""今天""", """天气""", """真""", """好""", """!"""]
__UpperCAmelCase =tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase ="""今天天气真好!"""
__UpperCAmelCase =[tokenizer.bos_token] + tokens
__UpperCAmelCase =[6, 9802, 14962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =tokenizer.decode(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 68 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : List[Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = None
if token is not None:
snake_case__ : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : List[Any] = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : str = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Tuple = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : Dict = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
snake_case__ : Union[str, Any] = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : Dict = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Dict = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Dict = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : Any = result.headers["""Location"""]
snake_case__ : Tuple = requests.get(__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : int = os.path.join(__magic_name__ , f"{artifact_name}.zip" )
with open(__magic_name__ , """wb""" ) as fp:
fp.write(response.content )
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : str=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Any = []
snake_case__ : Union[str, Any] = []
snake_case__ : Any = None
with zipfile.ZipFile(__magic_name__ ) as z:
for filename in z.namelist():
if not os.path.isdir(__magic_name__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__magic_name__ ) as f:
for line in f:
snake_case__ : Any = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
snake_case__ : str = line[: line.index(""": """ )]
snake_case__ : Optional[int] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
snake_case__ : Dict = line[len("""FAILED """ ) :]
failed_tests.append(__magic_name__ )
elif filename == "job_name.txt":
snake_case__ : Optional[Any] = line
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError(
f"`errors` and `failed_tests` should have the same number of elements. Got {len(__magic_name__ )} for `errors` "
f"and {len(__magic_name__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
""" problem.""" )
snake_case__ : Optional[Any] = None
if job_name and job_links:
snake_case__ : Optional[Any] = job_links.get(__magic_name__ , __magic_name__ )
# A list with elements of the form (line of error, error, failed test)
snake_case__ : List[Any] = [x + [y] + [job_link] for x, y in zip(__magic_name__ , __magic_name__ )]
return result
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : Union[str, Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = []
snake_case__ : Dict = [os.path.join(__magic_name__ , __magic_name__ ) for p in os.listdir(__magic_name__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__magic_name__ , job_links=__magic_name__ ) )
return errors
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : str=None ) -> List[Any]:
'''simple docstring'''
snake_case__ : Any = Counter()
counter.update([x[1] for x in logs] )
snake_case__ : Dict = counter.most_common()
snake_case__ : Any = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
snake_case__ : int = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case__ : str = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
snake_case__ : Tuple = test.split("""/""" )[2]
else:
snake_case__ : Any = None
return test
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : Union[str, Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : List[str] = [(x[0], x[1], get_model(x[2] )) for x in logs]
snake_case__ : List[Any] = [x for x in logs if x[2] is not None]
snake_case__ : Any = {x[2] for x in logs}
snake_case__ : Optional[Any] = {}
for test in tests:
snake_case__ : str = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
snake_case__ : Optional[int] = counter.most_common()
snake_case__ : Optional[int] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
snake_case__ : int = sum(error_counts.values() )
if n_errors > 0:
snake_case__ : str = {"""count""": n_errors, """errors""": error_counts}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : int ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Optional[Any] = """| no. | error | status |"""
snake_case__ : int = """|-:|:-|:-|"""
snake_case__ : int = [header, sep]
for error in reduced_by_error:
snake_case__ : Union[str, Any] = reduced_by_error[error]["""count"""]
snake_case__ : Dict = f"| {count} | {error[:1_00]} | |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : Dict ) -> List[Any]:
'''simple docstring'''
snake_case__ : List[Any] = """| model | no. of errors | major error | count |"""
snake_case__ : Optional[int] = """|-:|-:|-:|-:|"""
snake_case__ : Dict = [header, sep]
for model in reduced_by_model:
snake_case__ : Tuple = reduced_by_model[model]["""count"""]
snake_case__ , snake_case__ : Tuple = list(reduced_by_model[model]["""errors"""].items() )[0]
snake_case__ : Optional[int] = f"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
A_ : int = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
A_ : Optional[int] = get_job_links(args.workflow_run_id, token=args.token)
A_ : Optional[Any] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
A_ : int = k.find(" / ")
A_ : List[Any] = k[index + len(" / ") :]
A_ : List[str] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
A_ : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
A_ : str = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
A_ : List[str] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
A_ : Any = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
A_ : Any = reduce_by_error(errors)
A_ : Union[str, Any] = reduce_by_model(errors)
A_ : Any = make_github_table(reduced_by_error)
A_ : Optional[Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 38 | 0 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def __UpperCAmelCase ( _UpperCAmelCase : List[str] ) -> Union[str, Any]:
__snake_case = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
__snake_case , __snake_case = emb.weight.shape
__snake_case = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
__snake_case = emb.weight.data
return lin_layer
def __UpperCAmelCase ( _UpperCAmelCase : List[str] ) -> List[Any]:
__snake_case = torch.load(_UpperCAmelCase , map_location="cpu" )
__snake_case = mam_aaa["args"] or mam_aaa["cfg"]["model"]
__snake_case = mam_aaa["model"]
remove_ignore_keys_(_UpperCAmelCase )
__snake_case = state_dict["encoder.embed_tokens.weight"].shape[0]
__snake_case = MaMaaaConfig(
vocab_size=_UpperCAmelCase , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , )
__snake_case = state_dict["decoder.embed_tokens.weight"]
__snake_case = MaMaaaForConditionalGeneration(_UpperCAmelCase )
model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
__snake_case = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
a : Optional[int] = parser.parse_args()
a : List[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 69 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
A_ : Tuple = get_logger(__name__)
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = '''dummy_data'''
lowerCamelCase__ = '''datasets'''
lowerCamelCase__ = False
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , ):
snake_case__ : List[Any] = 0
snake_case__ : Union[str, Any] = dataset_name
snake_case__ : Optional[int] = cache_dir
snake_case__ : Union[str, Any] = use_local_dummy_data
snake_case__ : int = config
# download_callbacks take a single url as input
snake_case__ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
snake_case__ : Union[str, Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
snake_case__ : Union[str, Any] = str(__SCREAMING_SNAKE_CASE )
# to be downloaded
snake_case__ : List[str] = None
snake_case__ : List[str] = None
@property
def __UpperCamelCase ( self ):
if self._dummy_file is None:
snake_case__ : List[str] = self.download_dummy_data()
return self._dummy_file
@property
def __UpperCamelCase ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def __UpperCamelCase ( self ):
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
snake_case__ : Optional[int] = cached_path(
__SCREAMING_SNAKE_CASE , cache_dir=self.cache_dir , extract_compressed_file=__SCREAMING_SNAKE_CASE , force_extract=__SCREAMING_SNAKE_CASE )
return os.path.join(__SCREAMING_SNAKE_CASE , self.dummy_file_name )
@property
def __UpperCamelCase ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __UpperCamelCase ( self ):
if self._bucket_url is None:
snake_case__ : List[str] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def __UpperCamelCase ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
snake_case__ : List[Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
snake_case__ : List[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.create_dummy_data_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
return self.create_dummy_data_list(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
return self.create_dummy_data_single(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return path
def __UpperCamelCase ( self ):
return {}
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for single_url in single_urls:
download_callback(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : List[str] = single_urls
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = [os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) ) for x in single_urls]
else:
snake_case__ : List[Any] = single_urls
snake_case__ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) )
snake_case__ : Optional[int] = value
# make sure that values are unique
if all(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
snake_case__ : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
snake_case__ : Tuple = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , __SCREAMING_SNAKE_CASE ) ) for url in data_url )
snake_case__ : List[Any] = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
snake_case__ : List[str] = [data_url[0]] * len(__SCREAMING_SNAKE_CASE )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : List[Any] = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(__SCREAMING_SNAKE_CASE )
return dummy_data_list
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : Any = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(__SCREAMING_SNAKE_CASE ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
def _iter_archive_members(__SCREAMING_SNAKE_CASE ):
# this preserves the order of the members inside the ZIP archive
snake_case__ : List[str] = Path(self.dummy_file ).parent
snake_case__ : Dict = path.relative_to(__SCREAMING_SNAKE_CASE )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
snake_case__ : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = Path(__SCREAMING_SNAKE_CASE )
snake_case__ : int = _iter_archive_members(__SCREAMING_SNAKE_CASE ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(__SCREAMING_SNAKE_CASE ).as_posix(), file_path.open("""rb""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = [paths]
for path in paths:
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(__SCREAMING_SNAKE_CASE ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 38 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase : Dict = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = ["CLIPFeatureExtractor"]
lowerCamelCase : List[str] = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 70 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = IFImgaImgSuperResolutionPipeline
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __UpperCamelCase ( self ):
return self._get_superresolution_dummy_components()
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
snake_case__ : List[Any] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : Tuple = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __UpperCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __UpperCamelCase ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __UpperCamelCase ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __UpperCamelCase ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __UpperCamelCase ( self ):
self._test_save_load_local()
def __UpperCamelCase ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 38 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowerCamelCase = logging.get_logger(__name__)
# General docstring
_lowerCamelCase = """RegNetConfig"""
# Base docstring
_lowerCamelCase = """facebook/regnet-y-040"""
_lowerCamelCase = [1, 1088, 7, 7]
# Image classification docstring
_lowerCamelCase = """facebook/regnet-y-040"""
_lowerCamelCase = """tabby, tabby cat"""
_lowerCamelCase = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _snake_case (tf.keras.layers.Layer):
def __init__( self ,_snake_case ,_snake_case = 3 ,_snake_case = 1 ,_snake_case = 1 ,_snake_case = "relu" ,**_snake_case ,):
super().__init__(**_snake_case )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
UpperCAmelCase_ : Any = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
UpperCAmelCase_ : Dict = tf.keras.layers.ConvaD(
filters=_snake_case ,kernel_size=_snake_case ,strides=_snake_case ,padding="VALID" ,groups=_snake_case ,use_bias=_snake_case ,name="convolution" ,)
UpperCAmelCase_ : Dict = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name="normalization" )
UpperCAmelCase_ : Dict = ACTaFN[activation] if activation is not None else tf.identity
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Union[str, Any] = self.convolution(self.padding(_snake_case ) )
UpperCAmelCase_ : Union[str, Any] = self.normalization(_snake_case )
UpperCAmelCase_ : str = self.activation(_snake_case )
return hidden_state
class _snake_case (tf.keras.layers.Layer):
def __init__( self ,_snake_case ,**_snake_case ):
super().__init__(**_snake_case )
UpperCAmelCase_ : Tuple = config.num_channels
UpperCAmelCase_ : Dict = TFRegNetConvLayer(
out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name="embedder" ,)
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Optional[Any] = shape_list(_snake_case )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
UpperCAmelCase_ : Tuple = tf.transpose(_snake_case ,perm=(0, 2, 3, 1) )
UpperCAmelCase_ : Optional[Any] = self.embedder(_snake_case )
return hidden_state
class _snake_case (tf.keras.layers.Layer):
def __init__( self ,_snake_case ,_snake_case = 2 ,**_snake_case ):
super().__init__(**_snake_case )
UpperCAmelCase_ : Optional[Any] = tf.keras.layers.ConvaD(
filters=_snake_case ,kernel_size=1 ,strides=_snake_case ,use_bias=_snake_case ,name="convolution" )
UpperCAmelCase_ : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name="normalization" )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = False ):
return self.normalization(self.convolution(_snake_case ) ,training=_snake_case )
class _snake_case (tf.keras.layers.Layer):
def __init__( self ,_snake_case ,_snake_case ,**_snake_case ):
super().__init__(**_snake_case )
UpperCAmelCase_ : Optional[int] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_snake_case ,name="pooler" )
UpperCAmelCase_ : Tuple = [
tf.keras.layers.ConvaD(filters=_snake_case ,kernel_size=1 ,activation="relu" ,name="attention.0" ),
tf.keras.layers.ConvaD(filters=_snake_case ,kernel_size=1 ,activation="sigmoid" ,name="attention.2" ),
]
def UpperCamelCase__ ( self ,_snake_case ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
UpperCAmelCase_ : Optional[Any] = self.pooler(_snake_case )
for layer_module in self.attention:
UpperCAmelCase_ : Dict = layer_module(_snake_case )
UpperCAmelCase_ : Any = hidden_state * pooled
return hidden_state
class _snake_case (tf.keras.layers.Layer):
def __init__( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case = 1 ,**_snake_case ):
super().__init__(**_snake_case )
UpperCAmelCase_ : str = in_channels != out_channels or stride != 1
UpperCAmelCase_ : Optional[int] = max(1 ,out_channels // config.groups_width )
UpperCAmelCase_ : Tuple = (
TFRegNetShortCut(_snake_case ,stride=_snake_case ,name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" ,name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
UpperCAmelCase_ : List[str] = [
TFRegNetConvLayer(_snake_case ,kernel_size=1 ,activation=config.hidden_act ,name="layer.0" ),
TFRegNetConvLayer(
_snake_case ,stride=_snake_case ,groups=_snake_case ,activation=config.hidden_act ,name="layer.1" ),
TFRegNetConvLayer(_snake_case ,kernel_size=1 ,activation=_snake_case ,name="layer.2" ),
]
UpperCAmelCase_ : List[Any] = ACTaFN[config.hidden_act]
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Optional[Any] = hidden_state
for layer_module in self.layers:
UpperCAmelCase_ : str = layer_module(_snake_case )
UpperCAmelCase_ : List[str] = self.shortcut(_snake_case )
hidden_state += residual
UpperCAmelCase_ : Any = self.activation(_snake_case )
return hidden_state
class _snake_case (tf.keras.layers.Layer):
def __init__( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case = 1 ,**_snake_case ):
super().__init__(**_snake_case )
UpperCAmelCase_ : int = in_channels != out_channels or stride != 1
UpperCAmelCase_ : Optional[Any] = max(1 ,out_channels // config.groups_width )
UpperCAmelCase_ : Optional[Any] = (
TFRegNetShortCut(_snake_case ,stride=_snake_case ,name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" ,name="shortcut" )
)
UpperCAmelCase_ : Optional[Any] = [
TFRegNetConvLayer(_snake_case ,kernel_size=1 ,activation=config.hidden_act ,name="layer.0" ),
TFRegNetConvLayer(
_snake_case ,stride=_snake_case ,groups=_snake_case ,activation=config.hidden_act ,name="layer.1" ),
TFRegNetSELayer(_snake_case ,reduced_channels=int(round(in_channels / 4 ) ) ,name="layer.2" ),
TFRegNetConvLayer(_snake_case ,kernel_size=1 ,activation=_snake_case ,name="layer.3" ),
]
UpperCAmelCase_ : List[str] = ACTaFN[config.hidden_act]
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : List[Any] = hidden_state
for layer_module in self.layers:
UpperCAmelCase_ : Union[str, Any] = layer_module(_snake_case )
UpperCAmelCase_ : Tuple = self.shortcut(_snake_case )
hidden_state += residual
UpperCAmelCase_ : Dict = self.activation(_snake_case )
return hidden_state
class _snake_case (tf.keras.layers.Layer):
def __init__( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case = 2 ,_snake_case = 2 ,**_snake_case ):
super().__init__(**_snake_case )
UpperCAmelCase_ : Dict = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
UpperCAmelCase_ : Dict = [
# downsampling is done in the first layer with stride of 2
layer(_snake_case ,_snake_case ,_snake_case ,stride=_snake_case ,name="layers.0" ),
*[layer(_snake_case ,_snake_case ,_snake_case ,name=f'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def UpperCamelCase__ ( self ,_snake_case ):
for layer_module in self.layers:
UpperCAmelCase_ : int = layer_module(_snake_case )
return hidden_state
class _snake_case (tf.keras.layers.Layer):
def __init__( self ,_snake_case ,**_snake_case ):
super().__init__(**_snake_case )
UpperCAmelCase_ : Optional[int] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_snake_case ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name="stages.0" ,) )
UpperCAmelCase_ : Union[str, Any] = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_snake_case ,config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_snake_case ,_snake_case ,_snake_case ,depth=_snake_case ,name=f'''stages.{i+1}''' ) )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = False ,_snake_case = True ):
UpperCAmelCase_ : Union[str, Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCAmelCase_ : List[Any] = hidden_states + (hidden_state,)
UpperCAmelCase_ : int = stage_module(_snake_case )
if output_hidden_states:
UpperCAmelCase_ : int = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_snake_case ,hidden_states=_snake_case )
@keras_serializable
class _snake_case (tf.keras.layers.Layer):
__A : List[Any] =RegNetConfig
def __init__( self ,_snake_case ,**_snake_case ):
super().__init__(**_snake_case )
UpperCAmelCase_ : Any = config
UpperCAmelCase_ : int = TFRegNetEmbeddings(_snake_case ,name="embedder" )
UpperCAmelCase_ : List[str] = TFRegNetEncoder(_snake_case ,name="encoder" )
UpperCAmelCase_ : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_snake_case ,name="pooler" )
@unpack_inputs
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = False ,):
UpperCAmelCase_ : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase_ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase_ : Optional[int] = self.embedder(_snake_case ,training=_snake_case )
UpperCAmelCase_ : List[Any] = self.encoder(
_snake_case ,output_hidden_states=_snake_case ,return_dict=_snake_case ,training=_snake_case )
UpperCAmelCase_ : List[str] = encoder_outputs[0]
UpperCAmelCase_ : Optional[int] = self.pooler(_snake_case )
# Change to NCHW output format have uniformity in the modules
UpperCAmelCase_ : Dict = tf.transpose(_snake_case ,perm=(0, 3, 1, 2) )
UpperCAmelCase_ : List[str] = tf.transpose(_snake_case ,perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
UpperCAmelCase_ : int = tuple([tf.transpose(_snake_case ,perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_snake_case ,pooler_output=_snake_case ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,)
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Union[str, Any] =RegNetConfig
__A : Optional[Any] ="regnet"
__A : List[Any] ="pixel_values"
@property
def UpperCamelCase__ ( self ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) ,dtype=tf.floataa )}
_lowerCamelCase = R"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
_lowerCamelCase = R"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , __SCREAMING_SNAKE_CASE , )
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,_snake_case ,*_snake_case ,**_snake_case ):
super().__init__(_snake_case ,*_snake_case ,**_snake_case )
UpperCAmelCase_ : Optional[int] = TFRegNetMainLayer(_snake_case ,name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=_snake_case ,config_class=_CONFIG_FOR_DOC ,modality="vision" ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case=False ,):
UpperCAmelCase_ : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase_ : Optional[Any] = self.regnet(
pixel_values=_snake_case ,output_hidden_states=_snake_case ,return_dict=_snake_case ,training=_snake_case ,)
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,)
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , __SCREAMING_SNAKE_CASE , )
class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
def __init__( self ,_snake_case ,*_snake_case ,**_snake_case ):
super().__init__(_snake_case ,*_snake_case ,**_snake_case )
UpperCAmelCase_ : Dict = config.num_labels
UpperCAmelCase_ : List[str] = TFRegNetMainLayer(_snake_case ,name="regnet" )
# classification head
UpperCAmelCase_ : Dict = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels ,name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=_snake_case ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def UpperCamelCase__ ( self ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case=False ,):
UpperCAmelCase_ : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase_ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase_ : Any = self.regnet(
_snake_case ,output_hidden_states=_snake_case ,return_dict=_snake_case ,training=_snake_case )
UpperCAmelCase_ : List[str] = outputs.pooler_output if return_dict else outputs[1]
UpperCAmelCase_ : Union[str, Any] = self.classifier[0](_snake_case )
UpperCAmelCase_ : List[str] = self.classifier[1](_snake_case )
UpperCAmelCase_ : str = None if labels is None else self.hf_compute_loss(labels=_snake_case ,logits=_snake_case )
if not return_dict:
UpperCAmelCase_ : Tuple = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_snake_case ,logits=_snake_case ,hidden_states=outputs.hidden_states )
| 71 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
A_ : Dict = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : List[Any]=None , __magic_name__ : List[str]=None , __magic_name__ : List[str]=None ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[int] = True
while ask_again:
snake_case__ : Optional[Any] = input(__magic_name__ )
try:
if default is not None and len(__magic_name__ ) == 0:
return default
return convert_value(__magic_name__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Any=[] , __magic_name__ : Optional[int]=None , __magic_name__ : int=0 ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Union[str, Any] = BulletMenu(__magic_name__ , __magic_name__ )
snake_case__ : Optional[Any] = menu.run(default_choice=__magic_name__ )
return convert_value(__magic_name__ ) if convert_value is not None else result
def UpperCamelCase__ ( __magic_name__ : Any ) -> int:
'''simple docstring'''
snake_case__ : Tuple = int(__magic_name__ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def UpperCamelCase__ ( __magic_name__ : str ) -> Tuple:
'''simple docstring'''
snake_case__ : List[Any] = int(__magic_name__ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def UpperCamelCase__ ( __magic_name__ : List[str] ) -> List[Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = int(__magic_name__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def UpperCamelCase__ ( __magic_name__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = int(__magic_name__ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def UpperCamelCase__ ( __magic_name__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = int(__magic_name__ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def UpperCamelCase__ ( __magic_name__ : Dict ) -> Tuple:
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class __snake_case ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = super()._format_usage(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : str = usage.replace("""<command> [<args>] """ , """""" )
return usage
| 38 | 0 |
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
_UpperCAmelCase : int = [8, 5, 9, 7]
_UpperCAmelCase : List[str] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_UpperCAmelCase : Union[str, Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __magic_name__ :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , ):
lowercase =claim_vector
lowercase =allocated_resources_table
lowercase =maximum_claim_table
def _A( self ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def _A( self ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def _A( self ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(snake_case_ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def _A( self ):
return {self.__need().index(snake_case_ ): i for i in self.__need()}
def _A( self , **snake_case_ ):
lowercase =self.__need()
lowercase =self.__allocated_resources_table
lowercase =self.__available_resources()
lowercase =self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
lowercase =False
for each_need in need_list:
lowercase =True
for index, need in enumerate(snake_case_ ):
if need > available_resources[index]:
lowercase =False
break
if execution:
lowercase =True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
lowercase =original_need_index
print(f'Process {process_number + 1} is executing.' )
# remove the process run from stack
need_list.remove(snake_case_ )
# update available/freed resources stack
lowercase =np.array(snake_case_ ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(snake_case_ ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def _A( self ):
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
f'P{self.__allocated_resources_table.index(snake_case_ ) + 1}'
+ ''' '''.join(f'{it:>8}' for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
f'P{self.__maximum_claim_table.index(snake_case_ ) + 1}'
+ ''' '''.join(f'{it:>8}' for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(snake_case_ ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(snake_case_ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( __magic_name__ : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(__magic_name__ ) / len(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 | 0 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ : str = logging.get_logger(__name__)
a_ : Union[str, Any] = {
'vocab_file': 'vocab.txt',
'merges_file': 'bpe.codes',
}
a_ : List[str] = {
'vocab_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt',
},
'merges_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes',
},
}
a_ : Tuple = {
'vinai/phobert-base': 2_56,
'vinai/phobert-large': 2_56,
}
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = set()
SCREAMING_SNAKE_CASE = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
SCREAMING_SNAKE_CASE = char
SCREAMING_SNAKE_CASE = set(_UpperCAmelCase)
return pairs
class _snake_case ( A__ ):
_lowercase : Optional[Any] = VOCAB_FILES_NAMES
_lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , a , a , a="<s>" , a="</s>" , a="</s>" , a="<s>" , a="<unk>" , a="<pad>" , a="<mask>" , **a , ) -> Dict:
super().__init__(
bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , **a , )
SCREAMING_SNAKE_CASE = vocab_file
SCREAMING_SNAKE_CASE = merges_file
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 3
self.add_from_file(a)
SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
with open(a , encoding='utf-8') as merges_handle:
SCREAMING_SNAKE_CASE = merges_handle.read().split('\n')[:-1]
SCREAMING_SNAKE_CASE = [tuple(merge.split()[:-1]) for merge in merges]
SCREAMING_SNAKE_CASE = dict(zip(a , range(len(a))))
SCREAMING_SNAKE_CASE = {}
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self , a , a = None , a = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a)
if token_ids_a is None:
return [1] + ([0] * len(a)) + [1]
return [1] + ([0] * len(a)) + [1, 1] + ([0] * len(a)) + [1]
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
return len(self.encoder)
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
return dict(self.encoder , **self.added_tokens_encoder)
def SCREAMING_SNAKE_CASE__ ( self , a) -> Dict:
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE = tuple(a)
SCREAMING_SNAKE_CASE = tuple(list(word[:-1]) + [word[-1] + '</w>'])
SCREAMING_SNAKE_CASE = get_pairs(a)
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE = min(a , key=lambda a: self.bpe_ranks.get(a , float('inf')))
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = bigram
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
while i < len(a):
try:
SCREAMING_SNAKE_CASE = word.index(a , a)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
SCREAMING_SNAKE_CASE = j
if word[i] == first and i < len(a) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
SCREAMING_SNAKE_CASE = tuple(a)
SCREAMING_SNAKE_CASE = new_word
if len(a) == 1:
break
else:
SCREAMING_SNAKE_CASE = get_pairs(a)
SCREAMING_SNAKE_CASE = '@@ '.join(a)
SCREAMING_SNAKE_CASE = word[:-4]
SCREAMING_SNAKE_CASE = word
return word
def SCREAMING_SNAKE_CASE__ ( self , a) -> Tuple:
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = re.findall(R'\S+\n?' , a)
for token in words:
split_tokens.extend(list(self.bpe(a).split(' ')))
return split_tokens
def SCREAMING_SNAKE_CASE__ ( self , a) -> Any:
return self.encoder.get(a , self.encoder.get(self.unk_token))
def SCREAMING_SNAKE_CASE__ ( self , a) -> List[str]:
return self.decoder.get(a , self.unk_token)
def SCREAMING_SNAKE_CASE__ ( self , a) -> Tuple:
SCREAMING_SNAKE_CASE = ' '.join(a).replace('@@ ' , '').strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> Tuple[str]:
if not os.path.isdir(a):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
SCREAMING_SNAKE_CASE = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
SCREAMING_SNAKE_CASE = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(a):
copyfile(self.vocab_file , a)
if os.path.abspath(self.merges_file) != os.path.abspath(a):
copyfile(self.merges_file , a)
return out_vocab_file, out_merge_file
def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[Any]:
if isinstance(a , a):
try:
with open(a , 'r' , encoding='utf-8') as fd:
self.add_from_file(a)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''')
return
SCREAMING_SNAKE_CASE = f.readlines()
for lineTmp in lines:
SCREAMING_SNAKE_CASE = lineTmp.strip()
SCREAMING_SNAKE_CASE = line.rfind(' ')
if idx == -1:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt>\'')
SCREAMING_SNAKE_CASE = line[:idx]
SCREAMING_SNAKE_CASE = len(self.encoder)
| 73 |
'''simple docstring'''
from __future__ import annotations
A_ : str = "Muhammad Umer Farooq"
A_ : Optional[Any] = "MIT"
A_ : int = "1.0.0"
A_ : int = "Muhammad Umer Farooq"
A_ : int = "[email protected]"
A_ : Dict = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
super().__init__()
snake_case__ : list[str] = []
snake_case__ : List[Any] = domain
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
snake_case__ : str = parse.urljoin(self.domain , __SCREAMING_SNAKE_CASE )
self.urls.append(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
return ".".join(get_sub_domain_name(__magic_name__ ).split(""".""" )[-2:] )
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
return parse.urlparse(__magic_name__ ).netloc
def UpperCamelCase__ ( __magic_name__ : str = "https://github.com" ) -> list[str]:
'''simple docstring'''
snake_case__ : List[str] = get_domain_name(__magic_name__ )
# Initialize the parser
snake_case__ : Optional[Any] = Parser(__magic_name__ )
try:
# Open URL
snake_case__ : Any = requests.get(__magic_name__ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
snake_case__ : List[str] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
snake_case__ : Tuple = requests.get(__magic_name__ )
# Get the valid email.
snake_case__ : List[str] = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__magic_name__ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__magic_name__ )
if __name__ == "__main__":
A_ : str = emails_from_url("https://github.com")
print(F'{len(emails)} emails found:')
print("\n".join(sorted(emails)))
| 38 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 74 |
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> Tuple:
'''simple docstring'''
if not head:
return True
# split the list to two parts
snake_case__ , snake_case__ : Dict = head.next, head
while fast and fast.next:
snake_case__ : Any = fast.next.next
snake_case__ : int = slow.next
snake_case__ : Dict = slow.next
snake_case__ : List[str] = None # Don't forget here! But forget still works!
# reverse the second part
snake_case__ : Tuple = None
while second:
snake_case__ : Tuple = second.next
snake_case__ : Any = node
snake_case__ : str = second
snake_case__ : Optional[Any] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
snake_case__ : List[Any] = node.next
snake_case__ : int = head.next
return True
def UpperCamelCase__ ( __magic_name__ : Any ) -> Optional[Any]:
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
snake_case__ : List[Any] = head
while fast and fast.next:
snake_case__ , snake_case__ : Any = fast.next.next, slow.next
# 2. Push the second half into the stack
snake_case__ : Tuple = [slow.val]
while slow.next:
snake_case__ : Optional[Any] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
snake_case__ : str = cur.next
return True
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
if not head or not head.next:
return True
snake_case__ : int = {}
snake_case__ : Union[str, Any] = 0
while head:
if head.val in d:
d[head.val].append(__magic_name__ )
else:
snake_case__ : Tuple = [pos]
snake_case__ : Optional[Any] = head.next
pos += 1
snake_case__ : int = pos - 1
snake_case__ : str = 0
for v in d.values():
if len(__magic_name__ ) % 2 != 0:
middle += 1
else:
snake_case__ : List[str] = 0
for i in range(0 , len(__magic_name__ ) ):
if v[i] + v[len(__magic_name__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 38 | 0 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
UpperCamelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
UpperCamelCase__ = 1_2_8_0_2_2
UpperCamelCase__ = 1_2_8_0_2_8
@require_sentencepiece
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = MaMaaaTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
UpperCAmelCase__ : List[Any] = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
UpperCAmelCase__ : List[str] = dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase__ : Any = Path(self.tmpdirname )
save_json(_A , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_A , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
UpperCAmelCase__ : Optional[int] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self : str , **_A : str ):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_A )
def lowercase_ ( self : Union[str, Any] , _A : Any ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Any = '''</s>'''
UpperCAmelCase__ : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.get_tokenizer()
UpperCAmelCase__ : Optional[Any] = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<s>''' )
self.assertEqual(len(_A ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('''Skip this test while all models are still to be uploaded.''' )
def lowercase_ ( self : Dict ):
'''simple docstring'''
pass
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.get_tokenizer()
UpperCAmelCase__ : Tuple = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [2, 3, 4, 5, 6] , )
UpperCAmelCase__ : Dict = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
UpperCAmelCase__ : Optional[int] = tokenizer.convert_tokens_to_string(_A )
self.assertEqual(_A , '''This is a test''' )
@slow
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = {'''input_ids''': [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( unittest.TestCase ):
lowerCAmelCase__ = 'facebook/m2m100_418M'
lowerCAmelCase__ = [
'In my opinion, there are two levels of response from the French government.',
'NSA Affair Emphasizes Complete Lack of Debate on Intelligence',
]
lowerCAmelCase__ = [
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
]
# fmt: off
lowerCAmelCase__ = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2]
@classmethod
def lowercase_ ( cls : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''' )
UpperCAmelCase__ : str = 1
return cls
def lowercase_ ( self : Dict ):
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) , 128_006 )
self.assertEqual(self.tokenizer.get_lang_id('''en''' ) , 128_022 )
self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) , 128_076 )
self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) , 128_063 )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : int = self.tokenizer.get_vocab()
self.assertEqual(len(_A ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['''<unk>'''] , 3 )
self.assertIn(self.tokenizer.get_lang_token('''en''' ) , _A )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = '''en'''
UpperCAmelCase__ : Union[str, Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _A )
def lowercase_ ( self : str ):
'''simple docstring'''
self.assertIn(_A , self.tokenizer.all_special_ids )
# fmt: off
UpperCAmelCase__ : Tuple = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2]
# fmt: on
UpperCAmelCase__ : int = self.tokenizer.decode(_A , skip_special_tokens=_A )
UpperCAmelCase__ : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_A )
self.assertEqual(_A , _A )
self.assertNotIn(self.tokenizer.eos_token , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = tempfile.mkdtemp()
UpperCAmelCase__ : int = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_A )
UpperCAmelCase__ : Any = MaMaaaTokenizer.from_pretrained(_A )
self.assertDictEqual(new_tok.lang_token_to_id , _A )
@require_torch
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = '''en'''
UpperCAmelCase__ : str = '''fr'''
UpperCAmelCase__ : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_A , return_tensors='''pt''' )
UpperCAmelCase__ : Any = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
UpperCAmelCase__ : Union[str, Any] = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = '''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
UpperCAmelCase__ : Optional[Any] = '''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : str = '''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
UpperCAmelCase__ : int = '''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''' )
self.assertEqual(
nested_simplify(_A ) , {
# en_XX, A, test, EOS
'''input_ids''': [[128_022, 58, 4_183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 128_006,
} , )
| 75 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A_ : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A_ : str = 250004
A_ : str = 250020
@require_sentencepiece
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MBartTokenizer
lowerCamelCase__ = MBartTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
def __UpperCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case__ : Tuple = MBartTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = MBartTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
snake_case__ : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
snake_case__ : Optional[int] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def __UpperCamelCase ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case__ : Optional[int] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = tempfile.mkdtemp()
snake_case__ : int = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
snake_case__ : List[str] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case__ : Tuple = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=True
snake_case__ : Any = tempfile.mkdtemp()
snake_case__ : Optional[int] = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case__ : List[Any] = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=False
snake_case__ : Dict = tempfile.mkdtemp()
snake_case__ : Union[str, Any] = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case__ : Dict = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = '''facebook/mbart-large-en-ro'''
lowerCamelCase__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
lowerCamelCase__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
lowerCamelCase__ = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def __UpperCamelCase ( cls ):
snake_case__ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
snake_case__ : Any = 1
return cls
def __UpperCamelCase ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 2_5_0_0_2_0 )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
self.assertIn(__SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids )
snake_case__ : List[str] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
snake_case__ : List[Any] = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = ["""this is gunna be a long sentence """ * 2_0]
assert isinstance(src_text[0] , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = 1_0
snake_case__ : int = self.tokenizer(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __SCREAMING_SNAKE_CASE )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = tempfile.mkdtemp()
snake_case__ : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = MBartTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __SCREAMING_SNAKE_CASE )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
snake_case__ : int = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
snake_case__ : List[str] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
snake_case__ : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.tokenizer(self.src_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=3 , return_tensors="""pt""" )
snake_case__ : Optional[int] = self.tokenizer(
text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=1_0 , return_tensors="""pt""" )
snake_case__ : str = targets["""input_ids"""]
snake_case__ : Optional[Any] = shift_tokens_right(__SCREAMING_SNAKE_CASE , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , {
# A, test, EOS, en_XX
"""input_ids""": [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 2_5_0_0_0_1,
} , )
| 38 | 0 |
"""simple docstring"""
from typing import Dict
from .base import GenericTensor, Pipeline
class UpperCAmelCase_ ( snake_case ):
def _lowerCamelCase ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ ) -> Union[str, Any]:
if tokenize_kwargs is None:
__lowercase : Union[str, Any] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' )
__lowercase : str = truncation
__lowercase : Optional[int] = tokenize_kwargs
__lowercase : List[str] = {}
if return_tensors is not None:
__lowercase : Optional[int] = return_tensors
return preprocess_params, {}, postprocess_params
def _lowerCamelCase ( self , UpperCamelCase_ , **UpperCamelCase_ ) -> Dict[str, GenericTensor]:
__lowercase : List[str] = self.framework
__lowercase : Optional[int] = self.tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
return model_inputs
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Tuple:
__lowercase : Optional[Any] = self.model(**UpperCamelCase_ )
return model_outputs
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=False ) -> List[Any]:
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> int:
return super().__call__(*UpperCamelCase_ , **UpperCamelCase_ )
| 76 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : Dict = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''bit'''
lowerCamelCase__ = ['''preactivation''', '''bottleneck''']
lowerCamelCase__ = ['''SAME''', '''VALID''']
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE="preactivation" , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
snake_case__ : Tuple = global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported" )
snake_case__ : List[str] = num_channels
snake_case__ : Tuple = embedding_size
snake_case__ : str = hidden_sizes
snake_case__ : Optional[Any] = depths
snake_case__ : List[Any] = layer_type
snake_case__ : Dict = hidden_act
snake_case__ : Union[str, Any] = global_padding
snake_case__ : List[str] = num_groups
snake_case__ : str = drop_path_rate
snake_case__ : List[Any] = embedding_dynamic_padding
snake_case__ : List[str] = output_stride
snake_case__ : Dict = width_factor
snake_case__ : List[str] = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
snake_case__ , snake_case__ : Dict = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 38 | 0 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A = logging.get_logger(__name__)
class a__ ( __magic_name__ ):
lowercase_ = ["input_features", "is_longer"]
def __init__( self : List[str] , UpperCamelCase_ : Dict=64 , UpperCamelCase_ : Tuple=48000 , UpperCamelCase_ : List[Any]=480 , UpperCamelCase_ : List[str]=10 , UpperCamelCase_ : str=1024 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 14000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(
feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
__UpperCAmelCase : Union[str, Any] = top_db
__UpperCAmelCase : Optional[Any] = truncation
__UpperCAmelCase : str = padding
__UpperCAmelCase : int = fft_window_size
__UpperCAmelCase : str = (fft_window_size >> 1) + 1
__UpperCAmelCase : List[Any] = hop_length
__UpperCAmelCase : Optional[Any] = max_length_s
__UpperCAmelCase : Tuple = max_length_s * sampling_rate
__UpperCAmelCase : str = sampling_rate
__UpperCAmelCase : int = frequency_min
__UpperCAmelCase : Optional[Any] = frequency_max
__UpperCAmelCase : Any = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale="htk" , )
__UpperCAmelCase : Any = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm="slaney" , mel_scale="slaney" , )
def a_ ( self : Dict):
"""simple docstring"""
__UpperCAmelCase : Dict = copy.deepcopy(self.__dict__)
__UpperCAmelCase : str = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def a_ ( self : int , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None):
"""simple docstring"""
__UpperCAmelCase : List[Any] = spectrogram(
UpperCamelCase_ , window_function(self.fft_window_size , "hann") , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel="dB" , )
return log_mel_spectrogram.T
def a_ ( self : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1)) , 3)
if len(ranges[1]) == 0:
# if the audio is too short, we just use the first chunk
__UpperCAmelCase : str = [0]
if len(ranges[2]) == 0:
# if the audio is too short, we just use the first chunk
__UpperCAmelCase : Dict = [0]
# randomly choose index for each part
__UpperCAmelCase : Dict = np.random.choice(ranges[0])
__UpperCAmelCase : List[str] = np.random.choice(ranges[1])
__UpperCAmelCase : List[Any] = np.random.choice(ranges[2])
__UpperCAmelCase : List[Any] = mel[idx_front : idx_front + chunk_frames, :]
__UpperCAmelCase : List[str] = mel[idx_middle : idx_middle + chunk_frames, :]
__UpperCAmelCase : List[str] = mel[idx_back : idx_back + chunk_frames, :]
__UpperCAmelCase : Tuple = torch.tensor(mel[None, None, :])
__UpperCAmelCase : Union[str, Any] = torch.nn.functional.interpolate(
UpperCamelCase_ , size=[chunk_frames, 64] , mode="bilinear" , align_corners=UpperCamelCase_)
__UpperCAmelCase : Union[str, Any] = mel_shrink[0][0].numpy()
__UpperCAmelCase : Optional[int] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0)
return mel_fusion
def a_ ( self : Optional[Any] , UpperCamelCase_ : np.array , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any]):
"""simple docstring"""
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
__UpperCAmelCase : List[str] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
__UpperCAmelCase : List[Any] = len(UpperCamelCase_) - max_length
__UpperCAmelCase : int = np.random.randint(0 , overflow + 1)
__UpperCAmelCase : Union[str, Any] = waveform[idx : idx + max_length]
__UpperCAmelCase : Union[str, Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney)[None, :]
elif truncation == "fusion":
__UpperCAmelCase : Any = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters)
__UpperCAmelCase : Dict = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
__UpperCAmelCase : Tuple = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
__UpperCAmelCase : List[str] = np.stack([mel, mel, mel, mel] , axis=0)
__UpperCAmelCase : Any = False
else:
__UpperCAmelCase : List[str] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
__UpperCAmelCase : Union[str, Any] = True
else:
raise NotImplementedError(F"data_truncating {truncation} not implemented")
else:
__UpperCAmelCase : Optional[Any] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
__UpperCAmelCase : Tuple = int(max_length / len(UpperCamelCase_))
__UpperCAmelCase : List[str] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1))[:max_length]
if padding == "repeatpad":
__UpperCAmelCase : Union[str, Any] = int(max_length / len(UpperCamelCase_))
__UpperCAmelCase : Optional[Any] = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_))
__UpperCAmelCase : int = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0)
if truncation == "fusion":
__UpperCAmelCase : Any = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters)
__UpperCAmelCase : List[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0)
else:
__UpperCAmelCase : Optional[int] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney)[None, :]
return input_mel, longer
def __call__( self : Dict , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Any , ):
"""simple docstring"""
__UpperCAmelCase : int = truncation if truncation is not None else self.truncation
__UpperCAmelCase : Optional[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
F" was sampled with {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug.")
__UpperCAmelCase : List[str] = isinstance(UpperCamelCase_ , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}")
__UpperCAmelCase : str = is_batched_numpy or (
isinstance(UpperCamelCase_ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
__UpperCAmelCase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray):
__UpperCAmelCase : Tuple = np.asarray(UpperCamelCase_ , dtype=np.floataa)
elif isinstance(UpperCamelCase_ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
__UpperCAmelCase : Optional[int] = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
__UpperCAmelCase : int = [np.asarray(UpperCamelCase_)]
# convert to mel spectrogram, truncate and pad if needed.
__UpperCAmelCase : Optional[int] = [
self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_)
for waveform in raw_speech
]
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : List[Any] = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase_)
is_longer.append(UpperCamelCase_)
if truncation == "fusion" and sum(UpperCamelCase_) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
__UpperCAmelCase : Any = np.random.randint(0 , len(UpperCamelCase_))
__UpperCAmelCase : Optional[int] = True
if isinstance(input_mel[0] , UpperCamelCase_):
__UpperCAmelCase : Tuple = [np.asarray(UpperCamelCase_ , dtype=np.floataa) for feature in input_mel]
# is_longer is a list of bool
__UpperCAmelCase : List[str] = [[longer] for longer in is_longer]
__UpperCAmelCase : Optional[int] = {"input_features": input_mel, "is_longer": is_longer}
__UpperCAmelCase : Optional[int] = BatchFeature(UpperCamelCase_)
if return_tensors is not None:
__UpperCAmelCase : Any = input_features.convert_to_tensors(UpperCamelCase_)
return input_features
| 77 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Optional[int] = logging.get_logger(__name__)
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : str=False ) -> Tuple:
'''simple docstring'''
snake_case__ : int = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Tuple=False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : int = """"""
else:
snake_case__ : Dict = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : int = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case__ : Union[str, Any] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Optional[Any] = in_proj_bias[: config.hidden_size]
snake_case__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : Optional[int] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case__ : str = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : List[str] = dct.pop(__magic_name__ )
snake_case__ : Dict = val
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Optional[int] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : int=False ) -> Optional[int]:
'''simple docstring'''
snake_case__ : int = BitConfig(
global_padding="""same""" , layer_type="""bottleneck""" , depths=(3, 4, 9) , out_features=["""stage3"""] , embedding_dynamic_padding=__magic_name__ , )
snake_case__ : Optional[int] = ViTHybridConfig(backbone_config=__magic_name__ , image_size=3_84 , num_labels=10_00 )
snake_case__ : Union[str, Any] = False
# load original model from timm
snake_case__ : List[Any] = timm.create_model(__magic_name__ , pretrained=__magic_name__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : Optional[int] = timm_model.state_dict()
if base_model:
remove_classification_head_(__magic_name__ )
snake_case__ : int = create_rename_keys(__magic_name__ , __magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ , __magic_name__ )
snake_case__ : str = """huggingface/label-files"""
snake_case__ : Union[str, Any] = """imagenet-1k-id2label.json"""
snake_case__ : Dict = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : List[Any] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case__ : int = idalabel
snake_case__ : str = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case__ : str = ViTHybridModel(__magic_name__ ).eval()
else:
snake_case__ : Union[str, Any] = ViTHybridForImageClassification(__magic_name__ ).eval()
model.load_state_dict(__magic_name__ )
# create image processor
snake_case__ : Optional[Any] = create_transform(**resolve_data_config({} , model=__magic_name__ ) )
snake_case__ : Union[str, Any] = transform.transforms
snake_case__ : Tuple = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
snake_case__ : Any = ViTHybridImageProcessor(
do_resize=__magic_name__ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__magic_name__ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=__magic_name__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case__ : Any = prepare_img()
snake_case__ : int = transform(__magic_name__ ).unsqueeze(0 )
snake_case__ : List[str] = processor(__magic_name__ , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(__magic_name__ , __magic_name__ )
# verify logits
with torch.no_grad():
snake_case__ : Optional[Any] = model(__magic_name__ )
snake_case__ : Union[str, Any] = outputs.logits
print("""Predicted class:""" , logits.argmax(-1 ).item() )
if base_model:
snake_case__ : Dict = timm_model.forward_features(__magic_name__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__magic_name__ , outputs.pooler_output , atol=1E-3 )
else:
snake_case__ : int = timm_model(__magic_name__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__magic_name__ , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(__magic_name__ )
if push_to_hub:
print(f"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(f"ybelkada/{vit_name}" )
processor.push_to_hub(f"ybelkada/{vit_name}" )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
A_ : Union[str, Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 38 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE_: int =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Tuple ={
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __A ( UpperCamelCase__ ):
a__ : str = """gpt_neo"""
a__ : Union[str, Any] = ["""past_key_values"""]
a__ : List[str] = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__(self : str , __a : List[Any]=50257 , __a : Tuple=2048 , __a : Optional[Any]=2048 , __a : Dict=24 , __a : List[Any]=[[["global", "local"], 12]] , __a : List[Any]=16 , __a : Optional[int]=None , __a : Tuple=256 , __a : Optional[Any]="gelu_new" , __a : List[str]=0.0 , __a : int=0.0 , __a : Any=0.0 , __a : List[Any]=0.1 , __a : Optional[int]=1E-5 , __a : List[Any]=0.02 , __a : str=True , __a : int=50256 , __a : int=50256 , **__a : Dict , ):
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_layers
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = window_size
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = resid_dropout
UpperCAmelCase_ = embed_dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = classifier_dropout
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = eos_token_id
UpperCAmelCase_ = attention_types
UpperCAmelCase_ = self.expand_attention_types_params(__a )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
f"""but is `len(config.attention_layers) = {len(self.attention_layers )}`, """
f"""`config.num_layers = {self.num_layers}`. """
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
@staticmethod
def _lowercase (__a : int ):
UpperCAmelCase_ = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : List[Any] ) -> Tuple:
'''simple docstring'''
import torch
UpperCAmelCase_ = input.size()
UpperCAmelCase_ = len(snake_case_ )
UpperCAmelCase_ = shape[dimension]
UpperCAmelCase_ = torch.arange(0 , snake_case_ , snake_case_ )
UpperCAmelCase_ = torch.div(sizedim - size , snake_case_ , rounding_mode="floor" ) + 1
UpperCAmelCase_ = torch.arange(snake_case_ ) + low_indices[:min_length][:, None]
UpperCAmelCase_ = [slice(snake_case_ )] * rank
UpperCAmelCase_ = indices
UpperCAmelCase_ = input[s]
UpperCAmelCase_ = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : str ) -> str:
'''simple docstring'''
import torch
UpperCAmelCase_ = torch.arange(1 , snake_case_ )
UpperCAmelCase_ = torch.remainder(snake_case_ , snake_case_ )
UpperCAmelCase_ = remainders == 0
UpperCAmelCase_ = candidates[divisor_indices]
UpperCAmelCase_ = torch.max(snake_case_ )
return largest_divisor, torch.div(snake_case_ , snake_case_ , rounding_mode="floor" )
class __A ( UpperCamelCase__ ):
@property
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(__a , direction="inputs" )
UpperCAmelCase_ = {0: "batch", 1: "past_sequence + sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return common_inputs
@property
def _lowercase (self : Tuple ):
return self._config.num_heads
def _lowercase (self : int , __a : PreTrainedTokenizer , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , ):
UpperCAmelCase_ = super(__a , self ).generate_dummy_inputs(
__a , batch_size=__a , seq_length=__a , is_pair=__a , framework=__a )
# We need to order the input in the way they appears in the forward()
UpperCAmelCase_ = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCAmelCase_ , UpperCAmelCase_ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
UpperCAmelCase_ = seqlen + 2
UpperCAmelCase_ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCAmelCase_ = [
(torch.zeros(__a ), torch.zeros(__a )) for _ in range(self.num_layers )
]
UpperCAmelCase_ = common_inputs["attention_mask"]
if self.use_past:
UpperCAmelCase_ = ordered_inputs["attention_mask"].dtype
UpperCAmelCase_ = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__a , __a , dtype=__a )] , dim=1 )
return ordered_inputs
@property
def _lowercase (self : Dict ):
return 13
| 78 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = 42
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=("DownEncoderBlock2D",) , __SCREAMING_SNAKE_CASE=(6_4,) , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE="silu" , __SCREAMING_SNAKE_CASE=True , ):
super().__init__()
snake_case__ : str = layers_per_block
snake_case__ : int = torch.nn.Convad(
__SCREAMING_SNAKE_CASE , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
snake_case__ : List[Any] = None
snake_case__ : List[Any] = nn.ModuleList([] )
# down
snake_case__ : Union[str, Any] = block_out_channels[0]
for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = output_channel
snake_case__ : Union[str, Any] = block_out_channels[i]
snake_case__ : int = i == len(__SCREAMING_SNAKE_CASE ) - 1
snake_case__ : str = get_down_block(
__SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block , in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__SCREAMING_SNAKE_CASE , resnet_groups=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
self.down_blocks.append(__SCREAMING_SNAKE_CASE )
# mid
snake_case__ : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
# out
snake_case__ : Tuple = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__SCREAMING_SNAKE_CASE , eps=1e-6 )
snake_case__ : Tuple = nn.SiLU()
snake_case__ : str = 2 * out_channels if double_z else out_channels
snake_case__ : int = nn.Convad(block_out_channels[-1] , __SCREAMING_SNAKE_CASE , 3 , padding=1 )
snake_case__ : Union[str, Any] = False
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = x
snake_case__ : int = self.conv_in(__SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
snake_case__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
# middle
snake_case__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
snake_case__ : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# middle
snake_case__ : str = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
snake_case__ : List[str] = down_block(__SCREAMING_SNAKE_CASE )
# middle
snake_case__ : str = self.mid_block(__SCREAMING_SNAKE_CASE )
# post-process
snake_case__ : Any = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = self.conv_act(__SCREAMING_SNAKE_CASE )
snake_case__ : str = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=("UpDecoderBlock2D",) , __SCREAMING_SNAKE_CASE=(6_4,) , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE="silu" , __SCREAMING_SNAKE_CASE="group" , ):
super().__init__()
snake_case__ : Any = layers_per_block
snake_case__ : Optional[Any] = nn.Convad(
__SCREAMING_SNAKE_CASE , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
snake_case__ : Union[str, Any] = None
snake_case__ : Dict = nn.ModuleList([] )
snake_case__ : Optional[int] = in_channels if norm_type == """spatial""" else None
# mid
snake_case__ : Tuple = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
# up
snake_case__ : List[Any] = list(reversed(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = output_channel
snake_case__ : Optional[Any] = reversed_block_out_channels[i]
snake_case__ : List[str] = i == len(__SCREAMING_SNAKE_CASE ) - 1
snake_case__ : int = get_up_block(
__SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , prev_output_channel=__SCREAMING_SNAKE_CASE , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , resnet_groups=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , resnet_time_scale_shift=__SCREAMING_SNAKE_CASE , )
self.up_blocks.append(__SCREAMING_SNAKE_CASE )
snake_case__ : int = output_channel
# out
if norm_type == "spatial":
snake_case__ : List[Any] = SpatialNorm(block_out_channels[0] , __SCREAMING_SNAKE_CASE )
else:
snake_case__ : Any = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__SCREAMING_SNAKE_CASE , eps=1e-6 )
snake_case__ : Tuple = nn.SiLU()
snake_case__ : Union[str, Any] = nn.Convad(block_out_channels[0] , __SCREAMING_SNAKE_CASE , 3 , padding=1 )
snake_case__ : int = False
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
snake_case__ : Union[str, Any] = z
snake_case__ : Any = self.conv_in(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
snake_case__ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
snake_case__ : int = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
else:
# middle
snake_case__ : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : str = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
# middle
snake_case__ : List[Any] = self.mid_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : Dict = up_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
snake_case__ : Optional[Any] = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : str = self.conv_norm_out(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.conv_act(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="random" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True ):
super().__init__()
snake_case__ : int = n_e
snake_case__ : Optional[int] = vq_embed_dim
snake_case__ : int = beta
snake_case__ : Optional[int] = legacy
snake_case__ : Dict = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
snake_case__ : List[str] = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
snake_case__ : Optional[Any] = self.used.shape[0]
snake_case__ : List[str] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
snake_case__ : Dict = self.re_embed
snake_case__ : List[str] = self.re_embed + 1
print(
f"Remapping {self.n_e} indices to {self.re_embed} indices. "
f"Using {self.unknown_index} for unknown indices." )
else:
snake_case__ : Union[str, Any] = n_e
snake_case__ : str = sane_index_shape
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
snake_case__ : Dict = inds.reshape(ishape[0] , -1 )
snake_case__ : Any = self.used.to(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = (inds[:, :, None] == used[None, None, ...]).long()
snake_case__ : List[Any] = match.argmax(-1 )
snake_case__ : List[str] = match.sum(2 ) < 1
if self.unknown_index == "random":
snake_case__ : List[str] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
snake_case__ : Optional[Any] = self.unknown_index
return new.reshape(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
snake_case__ : int = inds.reshape(ishape[0] , -1 )
snake_case__ : Optional[int] = self.used.to(__SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
snake_case__ : List[Any] = 0 # simply set to zero
snake_case__ : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __SCREAMING_SNAKE_CASE )
return back.reshape(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
# reshape z -> (batch, height, width, channel) and flatten
snake_case__ : Any = z.permute(0 , 2 , 3 , 1 ).contiguous()
snake_case__ : Optional[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
snake_case__ : Dict = torch.argmin(torch.cdist(__SCREAMING_SNAKE_CASE , self.embedding.weight ) , dim=1 )
snake_case__ : Union[str, Any] = self.embedding(__SCREAMING_SNAKE_CASE ).view(z.shape )
snake_case__ : List[str] = None
snake_case__ : Union[str, Any] = None
# compute loss for embedding
if not self.legacy:
snake_case__ : Tuple = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
snake_case__ : List[Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
snake_case__ : Any = z + (z_q - z).detach()
# reshape back to match original input shape
snake_case__ : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
snake_case__ : List[Any] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
snake_case__ : str = self.remap_to_used(__SCREAMING_SNAKE_CASE )
snake_case__ : str = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
snake_case__ : Tuple = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
snake_case__ : List[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
snake_case__ : Optional[int] = self.unmap_to_all(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
snake_case__ : int = self.embedding(__SCREAMING_SNAKE_CASE )
if shape is not None:
snake_case__ : str = z_q.view(__SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
snake_case__ : str = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
snake_case__ : Tuple = parameters
snake_case__ , snake_case__ : Any = torch.chunk(__SCREAMING_SNAKE_CASE , 2 , dim=1 )
snake_case__ : Union[str, Any] = torch.clamp(self.logvar , -30.0 , 20.0 )
snake_case__ : Optional[int] = deterministic
snake_case__ : Optional[int] = torch.exp(0.5 * self.logvar )
snake_case__ : Any = torch.exp(self.logvar )
if self.deterministic:
snake_case__ : List[str] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE = None ):
# make sure sample is on the same device as the parameters and has same dtype
snake_case__ : Dict = randn_tensor(
self.mean.shape , generator=__SCREAMING_SNAKE_CASE , device=self.parameters.device , dtype=self.parameters.dtype )
snake_case__ : Optional[int] = self.mean + self.std * sample
return x
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
snake_case__ : Any = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
return self.mean
| 38 | 0 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase="shi-labs/oneformer_demo" ) -> str:
'''simple docstring'''
with open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) as f:
UpperCAmelCase__ : int = json.load(__lowerCamelCase )
UpperCAmelCase__ : Dict = {}
UpperCAmelCase__ : Dict = []
UpperCAmelCase__ : int = []
for key, info in class_info.items():
UpperCAmelCase__ : int = info["""name"""]
class_names.append(info["""name"""] )
if info["isthing"]:
thing_ids.append(int(__lowerCamelCase ) )
UpperCAmelCase__ : List[str] = thing_ids
UpperCAmelCase__ : Any = class_names
return metadata
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=30 , _lowerCAmelCase=400 , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=10 , _lowerCAmelCase=False , _lowerCAmelCase=255 , _lowerCAmelCase="shi-labs/oneformer_demo" , _lowerCAmelCase="ade20k_panoptic.json" , _lowerCAmelCase=10 , ):
UpperCAmelCase__ : Any = parent
UpperCAmelCase__ : Tuple = batch_size
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : str = min_resolution
UpperCAmelCase__ : Dict = max_resolution
UpperCAmelCase__ : List[str] = do_resize
UpperCAmelCase__ : Union[str, Any] = {"""shortest_edge""": 32, """longest_edge""": 1333} if size is None else size
UpperCAmelCase__ : str = do_normalize
UpperCAmelCase__ : Optional[Any] = image_mean
UpperCAmelCase__ : Any = image_std
UpperCAmelCase__ : str = class_info_file
UpperCAmelCase__ : Tuple = prepare_metadata(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : Dict = num_text
UpperCAmelCase__ : Dict = repo_path
# for the post_process_functions
UpperCAmelCase__ : Union[str, Any] = 2
UpperCAmelCase__ : int = 10
UpperCAmelCase__ : int = 10
UpperCAmelCase__ : int = 3
UpperCAmelCase__ : int = 4
UpperCAmelCase__ : Any = num_labels
UpperCAmelCase__ : List[Any] = do_reduce_labels
UpperCAmelCase__ : int = ignore_index
def __UpperCAmelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=False ):
if not batched:
UpperCAmelCase__ : Any = image_inputs[0]
if isinstance(_lowerCAmelCase , Image.Image ):
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = image.size
else:
UpperCAmelCase__ , UpperCAmelCase__ : Any = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase__ : Dict = int(self.size["""shortest_edge"""] * h / w )
UpperCAmelCase__ : Tuple = self.size["""shortest_edge"""]
elif w > h:
UpperCAmelCase__ : Dict = self.size["""shortest_edge"""]
UpperCAmelCase__ : Optional[int] = int(self.size["""shortest_edge"""] * w / h )
else:
UpperCAmelCase__ : List[str] = self.size["""shortest_edge"""]
UpperCAmelCase__ : Optional[Any] = self.size["""shortest_edge"""]
else:
UpperCAmelCase__ : int = []
for image in image_inputs:
UpperCAmelCase__ , UpperCAmelCase__ : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase__ : Optional[Any] = max(_lowerCAmelCase , key=lambda _lowerCAmelCase : item[0] )[0]
UpperCAmelCase__ : Dict = max(_lowerCAmelCase , key=lambda _lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
def __UpperCAmelCase ( self ):
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class UpperCAmelCase_ ( __lowerCamelCase , unittest.TestCase ):
__lowerCamelCase = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
__lowerCamelCase = image_processing_class
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Optional[int] = OneFormerImageProcessorTester(self )
@property
def __UpperCAmelCase ( self ):
return self.image_processing_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , """image_mean""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """image_std""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """ignore_index""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """class_info_file""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """num_text""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """repo_path""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """metadata""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_reduce_labels""" ) )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
# Initialize image_processor
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase__ : Optional[int] = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.image_processing_tester.get_expected_values(_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.image_processing_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase )
UpperCAmelCase__ : str = image_processor(
_lowerCAmelCase , ["""semantic"""] * len(_lowerCAmelCase ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self ):
# Initialize image_processor
UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
UpperCAmelCase__ : Optional[Any] = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.image_processing_tester.get_expected_values(_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.image_processing_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = image_processor(
_lowerCAmelCase , ["""semantic"""] * len(_lowerCAmelCase ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self ):
# Initialize image_processor
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase__ : Optional[Any] = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.image_processing_tester.get_expected_values(_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.image_processing_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = image_processor(
_lowerCAmelCase , ["""semantic"""] * len(_lowerCAmelCase ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase="np" ):
UpperCAmelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
UpperCAmelCase__ : Union[str, Any] = self.image_processing_tester.num_labels
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=_lowerCAmelCase )
if with_segmentation_maps:
UpperCAmelCase__ : int = num_labels
if is_instance_map:
UpperCAmelCase__ : Optional[int] = list(range(_lowerCAmelCase ) ) * 2
UpperCAmelCase__ : Dict = dict(enumerate(_lowerCAmelCase ) )
UpperCAmelCase__ : int = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
UpperCAmelCase__ : List[str] = [Image.fromarray(_lowerCAmelCase ) for annotation in annotations]
UpperCAmelCase__ : List[Any] = image_processor(
_lowerCAmelCase , ["""semantic"""] * len(_lowerCAmelCase ) , _lowerCAmelCase , return_tensors="""pt""" , instance_id_to_semantic_id=_lowerCAmelCase , pad_and_return_pixel_mask=_lowerCAmelCase , )
return inputs
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
def common(_lowerCAmelCase=False , _lowerCAmelCase=None ):
UpperCAmelCase__ : List[str] = self.comm_get_image_processor_inputs(
with_segmentation_maps=_lowerCAmelCase , is_instance_map=_lowerCAmelCase , segmentation_type=_lowerCAmelCase )
UpperCAmelCase__ : Any = inputs["""mask_labels"""]
UpperCAmelCase__ : Optional[Any] = inputs["""class_labels"""]
UpperCAmelCase__ : Any = inputs["""pixel_values"""]
UpperCAmelCase__ : Any = inputs["""text_inputs"""]
# check the batch_size
for mask_label, class_label, text_input in zip(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(_lowerCAmelCase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=_lowerCAmelCase )
common(is_instance_map=_lowerCAmelCase , segmentation_type="""pil""" )
common(is_instance_map=_lowerCAmelCase , segmentation_type="""pil""" )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : int = np.zeros((20, 50) )
UpperCAmelCase__ : List[Any] = 1
UpperCAmelCase__ : List[Any] = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : str = binary_mask_to_rle(_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
UpperCAmelCase__ : Tuple = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase__ : str = fature_extractor.post_process_semantic_segmentation(_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
UpperCAmelCase__ : Tuple = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
UpperCAmelCase__ : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(_lowerCAmelCase , target_sizes=_lowerCAmelCase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
UpperCAmelCase__ : str = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase__ : Optional[int] = image_processor.post_process_instance_segmentation(_lowerCAmelCase , threshold=0 )
self.assertTrue(len(_lowerCAmelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , _lowerCAmelCase )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : int = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
UpperCAmelCase__ : List[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase__ : List[Any] = image_processor.post_process_panoptic_segmentation(_lowerCAmelCase , threshold=0 )
self.assertTrue(len(_lowerCAmelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , _lowerCAmelCase )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 79 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[1, 3_8_4, 2_4, 2_4] , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , ):
snake_case__ : str = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : Optional[int] = patch_size
snake_case__ : List[str] = num_channels
snake_case__ : Any = is_training
snake_case__ : int = use_labels
snake_case__ : str = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : str = backbone_out_indices
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Dict = intermediate_size
snake_case__ : Optional[Any] = hidden_act
snake_case__ : str = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : Dict = initializer_range
snake_case__ : Optional[int] = num_labels
snake_case__ : str = backbone_featmap_shape
snake_case__ : List[Any] = scope
snake_case__ : Optional[Any] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
snake_case__ : List[Any] = (image_size // patch_size) ** 2
snake_case__ : Union[str, Any] = num_patches + 1
def __UpperCamelCase ( self ):
snake_case__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : str = None
if self.use_labels:
snake_case__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
snake_case__ : Any = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [9_6, 1_9_2, 3_8_4, 7_6_8],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__SCREAMING_SNAKE_CASE , backbone_featmap_shape=self.backbone_featmap_shape , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = DPTModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = self.num_labels
snake_case__ : str = DPTForDepthEstimation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = self.num_labels
snake_case__ : Dict = DPTForSemanticSegmentation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : str = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Any = config_and_inputs
snake_case__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowerCamelCase__ = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = DPTModelTester(self )
snake_case__ : Any = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Tuple = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : List[str] = [*signature.parameters.keys()]
snake_case__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : int = True
if model_class in get_values(__SCREAMING_SNAKE_CASE ):
continue
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
snake_case__ : Optional[Any] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCamelCase ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Union[str, Any] = False
snake_case__ : str = True
if model_class in get_values(__SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
snake_case__ : List[str] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : str = _config_zero_init(__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(config=__SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
snake_case__ : str = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
snake_case__ : Optional[int] = [f"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __UpperCamelCase ( self ):
pass
@slow
def __UpperCamelCase ( self ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
snake_case__ : List[str] = DPTModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Dict = """add"""
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] = DPTForDepthEstimation(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> Dict:
'''simple docstring'''
snake_case__ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Dict = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
snake_case__ : Union[str, Any] = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = prepare_img()
snake_case__ : Optional[int] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case__ : Dict = model(**__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = outputs.predicted_depth
# verify the predicted depth
snake_case__ : Any = torch.Size((1, 3_8_4, 3_8_4) )
self.assertEqual(predicted_depth.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 38 | 0 |
class __UpperCamelCase :
def __init__( self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = name
__lowercase = val
def __str__( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return F'{self.__class__.__name__}({self.name}, {self.val})'
def __lt__( self : Optional[int] , _lowerCAmelCase : int ) -> Any:
"""simple docstring"""
return self.val < other.val
class __UpperCamelCase :
def __init__( self : Union[str, Any] , _lowerCAmelCase : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = {}
__lowercase = {}
__lowercase = self.build_heap(_lowerCAmelCase )
def __getitem__( self : List[Any] , _lowerCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
return self.get_value(_lowerCAmelCase )
def _a ( self : str , _lowerCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return (idx - 1) // 2
def _a ( self : Optional[Any] , _lowerCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
return idx * 2 + 1
def _a ( self : List[Any] , _lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return idx * 2 + 2
def _a ( self : int , _lowerCAmelCase : Optional[Any] ) -> str:
"""simple docstring"""
return self.heap_dict[key]
def _a ( self : Optional[Any] , _lowerCAmelCase : Any ) -> Any:
"""simple docstring"""
__lowercase = len(_lowerCAmelCase ) - 1
__lowercase = self.get_parent_idx(_lowerCAmelCase )
for idx, i in enumerate(_lowerCAmelCase ):
__lowercase = idx
__lowercase = i.val
for i in range(_lowerCAmelCase , -1 , -1 ):
self.sift_down(_lowerCAmelCase , _lowerCAmelCase )
return array
def _a ( self : List[str] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple ) -> List[str]:
"""simple docstring"""
while True:
__lowercase = self.get_left_child_idx(_lowerCAmelCase ) # noqa: E741
__lowercase = self.get_right_child_idx(_lowerCAmelCase )
__lowercase = idx
if l < len(_lowerCAmelCase ) and array[l] < array[idx]:
__lowercase = l
if r < len(_lowerCAmelCase ) and array[r] < array[smallest]:
__lowercase = r
if smallest != idx:
__lowercase , __lowercase = array[smallest], array[idx]
(
(
__lowercase
) , (
__lowercase
) ,
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
__lowercase = smallest
else:
break
def _a ( self : int , _lowerCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
__lowercase = self.get_parent_idx(_lowerCAmelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
__lowercase , __lowercase = self.heap[idx], self.heap[p]
__lowercase , __lowercase = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
__lowercase = p
__lowercase = self.get_parent_idx(_lowerCAmelCase )
def _a ( self : Optional[int] ) -> int:
"""simple docstring"""
return self.heap[0]
def _a ( self : str ) -> str:
"""simple docstring"""
__lowercase , __lowercase = self.heap[-1], self.heap[0]
__lowercase , __lowercase = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
__lowercase = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def _a ( self : Union[str, Any] , _lowerCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
self.heap.append(_lowerCAmelCase )
__lowercase = len(self.heap ) - 1
__lowercase = node.val
self.sift_up(len(self.heap ) - 1 )
def _a ( self : List[Any] ) -> int:
"""simple docstring"""
return len(self.heap ) == 0
def _a ( self : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
__lowercase = new_value
__lowercase = new_value
self.sift_up(self.idx_of_element[node] )
__UpperCamelCase : Tuple = Node("""R""", -1)
__UpperCamelCase : Union[str, Any] = Node("""B""", 6)
__UpperCamelCase : Optional[Any] = Node("""A""", 3)
__UpperCamelCase : Union[str, Any] = Node("""X""", 1)
__UpperCamelCase : Any = Node("""E""", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__UpperCamelCase : Tuple = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("""Min Heap - before decrease key""")
for i in my_min_heap.heap:
print(i)
print("""Min Heap - After decrease key of node [B -> -17]""")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case__ : int = botoa.client("""iam""" )
snake_case__ : Union[str, Any] = {
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=__magic_name__ , AssumeRolePolicyDocument=json.dumps(__magic_name__ , indent=2 ) )
snake_case__ : Dict = {
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=__magic_name__ , PolicyName=f"{role_name}_policy_permission" , PolicyDocument=json.dumps(__magic_name__ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f"role {role_name} already exists. Using existing one" )
def UpperCamelCase__ ( __magic_name__ : Any ) -> Tuple:
'''simple docstring'''
snake_case__ : List[str] = botoa.client("""iam""" )
return iam_client.get_role(RoleName=__magic_name__ )["Role"]["Arn"]
def UpperCamelCase__ ( ) -> Tuple:
'''simple docstring'''
snake_case__ : Union[str, Any] = _ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , __magic_name__ , )
snake_case__ : List[Any] = None
if credentials_configuration == 0:
snake_case__ : Dict = _ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
snake_case__ : List[str] = aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
snake_case__ : List[str] = _ask_field("""AWS Access Key ID: """ )
snake_case__ : int = aws_access_key_id
snake_case__ : Optional[Any] = _ask_field("""AWS Secret Access Key: """ )
snake_case__ : List[str] = aws_secret_access_key
snake_case__ : Tuple = _ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
snake_case__ : Optional[int] = aws_region
snake_case__ : int = _ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , __magic_name__ , )
if role_management == 0:
snake_case__ : Optional[Any] = _ask_field("""Enter your IAM role name: """ )
else:
snake_case__ : Optional[int] = """accelerate_sagemaker_execution_role"""
print(f"Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials" )
_create_iam_role_for_sagemaker(__magic_name__ )
snake_case__ : Dict = _ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Any = None
if is_custom_docker_image:
snake_case__ : str = _ask_field("""Enter your Docker image: """ , lambda __magic_name__ : str(__magic_name__ ).lower() )
snake_case__ : Tuple = _ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : List[Any] = None
if is_sagemaker_inputs_enabled:
snake_case__ : str = _ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda __magic_name__ : str(__magic_name__ ).lower() , )
snake_case__ : Optional[int] = _ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Optional[Any] = None
if is_sagemaker_metrics_enabled:
snake_case__ : List[Any] = _ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda __magic_name__ : str(__magic_name__ ).lower() , )
snake_case__ : Tuple = _ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
snake_case__ : Any = {}
snake_case__ : List[Any] = _ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
if use_dynamo:
snake_case__ : str = """dynamo_"""
snake_case__ : Tuple = _ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
snake_case__ : List[str] = _ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
if use_custom_options:
snake_case__ : str = _ask_options(
"""Which mode do you want to use?""" , __magic_name__ , lambda __magic_name__ : TORCH_DYNAMO_MODES[int(__magic_name__ )] , default="""default""" , )
snake_case__ : Union[str, Any] = _ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : str = _ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Dict = """Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
snake_case__ : List[str] = _ask_options(
__magic_name__ , __magic_name__ , lambda __magic_name__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(__magic_name__ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
snake_case__ : Optional[int] = _ask_field(__magic_name__ , lambda __magic_name__ : str(__magic_name__ ).lower() , default="""ml.p3.2xlarge""" )
snake_case__ : Dict = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
snake_case__ : Optional[Any] = _ask_field(
"""How many machines do you want use? [1]: """ , __magic_name__ , default=1 , )
snake_case__ : Union[str, Any] = _ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=__magic_name__ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=__magic_name__ , use_cpu=__magic_name__ , dynamo_config=__magic_name__ , eca_instance_type=__magic_name__ , profile=__magic_name__ , region=__magic_name__ , iam_role_name=__magic_name__ , mixed_precision=__magic_name__ , num_machines=__magic_name__ , sagemaker_inputs_file=__magic_name__ , sagemaker_metrics_file=__magic_name__ , )
| 38 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase : Any , lowerCamelCase : str=7 , lowerCamelCase : List[str]=3 , lowerCamelCase : int=18 , lowerCamelCase : List[str]=30 , lowerCamelCase : Any=400 , lowerCamelCase : Optional[int]=True , lowerCamelCase : List[Any]=None , lowerCamelCase : Tuple=True , lowerCamelCase : Optional[int]=None , lowerCamelCase : Any=True , ) -> str:
__snake_case : Dict = size if size is not None else {"shortest_edge": 20}
__snake_case : Dict = crop_size if crop_size is not None else {"height": 18, "width": 18}
__snake_case : Optional[Any] = parent
__snake_case : Optional[int] = batch_size
__snake_case : Optional[Any] = num_channels
__snake_case : List[str] = image_size
__snake_case : Optional[int] = min_resolution
__snake_case : Optional[Any] = max_resolution
__snake_case : List[Any] = do_resize
__snake_case : List[Any] = size
__snake_case : Optional[int] = do_center_crop
__snake_case : int = crop_size
__snake_case : List[str] = do_flip_channel_order
def __snake_case ( self : Tuple ) -> List[str]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = MobileViTImageProcessor if is_vision_available() else None
def __snake_case ( self : Union[str, Any] ) -> str:
__snake_case : int = MobileViTImageProcessingTester(self )
@property
def __snake_case ( self : Optional[Any] ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : Tuple ) -> Any:
__snake_case : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_center_crop" ) )
self.assertTrue(hasattr(lowerCamelCase , "center_crop" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_flip_channel_order" ) )
def __snake_case ( self : List[Any] ) -> Union[str, Any]:
__snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
__snake_case : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __snake_case ( self : Optional[Any] ) -> Optional[Any]:
pass
def __snake_case ( self : Union[str, Any] ) -> int:
# Initialize image_processing
__snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
__snake_case : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case : Dict = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __snake_case ( self : Tuple ) -> List[Any]:
# Initialize image_processing
__snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
__snake_case : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case : Tuple = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __snake_case ( self : Dict ) -> int:
# Initialize image_processing
__snake_case : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
__snake_case : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case : Any = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 81 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def UpperCamelCase__ ( __magic_name__ : str = "laptop" ) -> DataFrame:
'''simple docstring'''
snake_case__ : Union[str, Any] = f"https://www.amazon.in/laptop/s?k={product}"
snake_case__ : List[str] = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
snake_case__ : int = BeautifulSoup(requests.get(__magic_name__ , headers=__magic_name__ ).text )
# Initialize a Pandas dataframe with the column titles
snake_case__ : Optional[Any] = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
snake_case__ : Optional[int] = item.ha.text
snake_case__ : Any = """https://www.amazon.in/""" + item.ha.a["""href"""]
snake_case__ : List[str] = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
snake_case__ : Dict = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
snake_case__ : Optional[int] = """Not available"""
try:
snake_case__ : Tuple = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
snake_case__ : Optional[Any] = """"""
try:
snake_case__ : str = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 1_00 )
except ValueError:
snake_case__ : List[Any] = float("""nan""" )
except AttributeError:
pass
snake_case__ : str = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
snake_case__ : List[Any] = """ """
snake_case__ : Union[str, Any] = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
A_ : int = "headphones"
get_amazon_product_data(product).to_csv(F'Amazon Product Data for {product}.csv')
| 38 | 0 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ ):
assert (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and number_of_steps > 0
), f"""number_of_steps needs to be positive integer, your input {number_of_steps}"""
if number_of_steps == 1:
return 1
UpperCAmelCase_ , UpperCAmelCase_ = 1, 1
for _ in range(number_of_steps - 1 ):
UpperCAmelCase_ , UpperCAmelCase_ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = LongformerTokenizer
lowerCamelCase__ = True
lowerCamelCase__ = LongformerTokenizerFast
lowerCamelCase__ = True
def __UpperCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
snake_case__ : Optional[int] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
snake_case__ : int = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case__ : Any = {"""unk_token""": """<unk>"""}
snake_case__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = """lower newer"""
snake_case__ : Dict = """lower newer"""
return input_text, output_text
def __UpperCamelCase ( self ):
snake_case__ : int = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case__ : Tuple = """lower newer"""
snake_case__ : Optional[Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
snake_case__ : Tuple = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) # , add_prefix_space=True)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokens + [tokenizer.unk_token]
snake_case__ : List[Any] = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
snake_case__ : int = tokenizer.encode("""sequence builders""" , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.get_tokenizer()
snake_case__ : int = """Encode this sequence."""
snake_case__ : Union[str, Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
snake_case__ : Optional[int] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
snake_case__ : List[str] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing spaces after special tokens
snake_case__ : List[str] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )} ) # mask token has a left space
snake_case__ : Dict = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
snake_case__ : str = """Encode <mask> sequence"""
snake_case__ : Tuple = """Encode <mask>sequence"""
snake_case__ : Union[str, Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = encoded.index(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer.encode(__SCREAMING_SNAKE_CASE )
snake_case__ : str = encoded.index(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = """A, <mask> AllenNLP sentence."""
snake_case__ : str = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
snake_case__ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
snake_case__ : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def __UpperCamelCase ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
snake_case__ : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
snake_case__ : List[str] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state["""trim_offsets"""] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : Union[str, Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
snake_case__ : Any = f"{text_of_1_token} {text_of_1_token}"
snake_case__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Tuple = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Optional[Any] = f" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
snake_case__ : Dict = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ) + 1, 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
| 38 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_jukebox''': [
'''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''JukeboxConfig''',
'''JukeboxPriorConfig''',
'''JukeboxVQVAEConfig''',
],
'''tokenization_jukebox''': ['''JukeboxTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''JukeboxModel''',
'''JukeboxPreTrainedModel''',
'''JukeboxVQVAE''',
'''JukeboxPrior''',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 83 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : Any = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''resnet'''
lowerCamelCase__ = ['''basic''', '''bottleneck''']
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE="bottleneck" , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
snake_case__ : List[Any] = num_channels
snake_case__ : str = embedding_size
snake_case__ : List[Any] = hidden_sizes
snake_case__ : Dict = depths
snake_case__ : List[Any] = layer_type
snake_case__ : int = hidden_act
snake_case__ : Union[str, Any] = downsample_in_first_stage
snake_case__ : Dict = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
snake_case__ , snake_case__ : Any = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1e-3
| 38 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase = {
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 84 |
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 38 | 0 |
from ...processing_utils import ProcessorMixin
class snake_case ( UpperCamelCase_ ):
lowercase_ = ['image_processor', 'feature_extractor']
lowercase_ = 'TvltImageProcessor'
lowercase_ = 'TvltFeatureExtractor'
def __init__( self : int , a_ : List[str] , a_ : Dict )-> str:
"""simple docstring"""
super().__init__(image_processor=a_ , feature_extractor=a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor
SCREAMING_SNAKE_CASE__ : Any = feature_extractor
def __call__( self : Optional[Any] , a_ : int=None , a_ : str=None , a_ : int=None , a_ : Tuple=None , a_ : Tuple=False , a_ : Dict=False , *a_ : Tuple , **a_ : Union[str, Any] , )-> List[str]:
"""simple docstring"""
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if images is not None:
SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor(a_ , mask_pixel=a_ , *a_ , **a_ )
if images_mixed is not None:
SCREAMING_SNAKE_CASE__ : List[str] = self.image_processor(a_ , is_mixed=a_ , *a_ , **a_ )
if audio is not None:
SCREAMING_SNAKE_CASE__ : Tuple = self.feature_extractor(
a_ , *a_ , sampling_rate=a_ , mask_audio=a_ , **a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
if audio is not None:
output_dict.update(a_ )
if images is not None:
output_dict.update(a_ )
if images_mixed_dict is not None:
output_dict.update(a_ )
return output_dict
@property
def __lowercase( self : Dict )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor.model_input_names
SCREAMING_SNAKE_CASE__ : List[str] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 85 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ):
snake_case__ : str = []
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_init_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_train_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_train_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_epoch_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_epoch_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_step_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_step_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_evaluate""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_predict""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_save""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_log""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_prediction_step""" )
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Tuple = tempfile.mkdtemp()
def __UpperCamelCase ( self ):
shutil.rmtree(self.output_dir )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
snake_case__ : List[Any] = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionModelConfig(a=__SCREAMING_SNAKE_CASE , b=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionPreTrainedModel(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = TrainingArguments(self.output_dir , disable_tqdm=__SCREAMING_SNAKE_CASE , report_to=[] , **__SCREAMING_SNAKE_CASE )
return Trainer(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , train_dataset=__SCREAMING_SNAKE_CASE , eval_dataset=__SCREAMING_SNAKE_CASE , callbacks=__SCREAMING_SNAKE_CASE , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
# Order doesn't matter
snake_case__ : Tuple = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
snake_case__ : List[str] = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
for cba, cba in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , cba.__class__ )
elif not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(cba.__class__ , __SCREAMING_SNAKE_CASE )
else:
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = ["""on_init_end""", """on_train_begin"""]
snake_case__ : Union[str, Any] = 0
snake_case__ : Dict = len(trainer.get_eval_dataloader() )
snake_case__ : Any = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("""on_epoch_begin""" )
for _ in range(__SCREAMING_SNAKE_CASE ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""" )
expected_events.append("""on_epoch_end""" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __UpperCamelCase ( self ):
snake_case__ : Any = self.get_trainer()
snake_case__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# Callbacks passed at init are added to the default callbacks
snake_case__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case__ : Optional[Any] = self.get_trainer(disable_tqdm=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case__ : int = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.remove(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = self.get_trainer()
snake_case__ : List[str] = trainer.pop_callback(__SCREAMING_SNAKE_CASE )
self.assertEqual(cb.__class__ , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
trainer.add_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# We can also add, pop, or remove by instance
snake_case__ : List[Any] = self.get_trainer()
snake_case__ : List[str] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.remove(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = self.get_trainer()
snake_case__ : Any = trainer.callback_handler.callbacks[0]
snake_case__ : Optional[Any] = trainer.pop_callback(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
trainer.add_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# Independent log/save/eval
snake_case__ : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" )
trainer.train()
snake_case__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# A bit of everything
snake_case__ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
snake_case__ : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock:
snake_case__ : List[str] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(__SCREAMING_SNAKE_CASE ) in warn_mock.call_args[0][0]
| 38 | 0 |
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : int=13 , UpperCAmelCase : Optional[Any]=7 , UpperCAmelCase : Dict=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Tuple=99 , UpperCAmelCase : Dict=32 , UpperCAmelCase : Dict=5 , UpperCAmelCase : str=4 , UpperCAmelCase : Union[str, Any]=37 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : List[str]=128 , UpperCAmelCase : Optional[Any]=32 , UpperCAmelCase : Any=16 , UpperCAmelCase : int=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : List[Any]=3 , UpperCAmelCase : Any=4 , UpperCAmelCase : List[str]=None , ):
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = type_sequence_label_size
A_ = initializer_range
A_ = num_labels
A_ = num_choices
A_ = scope
def __A ( self : List[str] ):
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self : int ):
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
def __A ( self : Any ):
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = self.prepare_config_and_inputs()
A_ = True
A_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __A ( self : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] ):
A_ = NezhaModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase )
A_ = model(UpperCAmelCase , token_type_ids=UpperCAmelCase )
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , ):
A_ = True
A_ = NezhaModel(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , )
A_ = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , )
A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] ):
A_ = NezhaForMaskedLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : int ):
A_ = NezhaForNextSentencePrediction(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __A ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] ):
A_ = NezhaForPreTraining(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , next_sentence_label=UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __A ( self : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] ):
A_ = NezhaForQuestionAnswering(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str ):
A_ = self.num_labels
A_ = NezhaForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ):
A_ = self.num_labels
A_ = NezhaForTokenClassification(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : str , UpperCAmelCase : Tuple , UpperCAmelCase : str , UpperCAmelCase : int ):
A_ = self.num_choices
A_ = NezhaForMultipleChoice(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self : List[Any] ):
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _a ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : str = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
_lowerCamelCase : Optional[int] = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase : List[str] = True
def __A ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=False ):
A_ = super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
if return_labels:
if model_class in get_values(UpperCAmelCase ):
A_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase )
A_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
return inputs_dict
def __A ( self : Any ):
A_ = NezhaModelTester(self )
A_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def __A ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A ( self : List[str] ):
A_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase )
def __A ( self : str ):
# This regression test was failing with PyTorch < 1.3
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
A_ = None
self.model_tester.create_and_check_model_as_decoder(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , )
def __A ( self : List[str] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def __A ( self : Tuple ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def __A ( self : Tuple ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*UpperCAmelCase )
def __A ( self : Union[str, Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase )
def __A ( self : Any ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def __A ( self : int ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def __A ( self : int ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def __A ( self : int ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = NezhaModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@slow
@require_torch_gpu
def __A ( self : List[str] ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
A_ = True
A_ = model_class(config=UpperCAmelCase )
A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
A_ = torch.jit.trace(
UpperCAmelCase , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCAmelCase , os.path.join(UpperCAmelCase , "bert.pt" ) )
A_ = torch.jit.load(os.path.join(UpperCAmelCase , "bert.pt" ) , map_location=UpperCAmelCase )
loaded(inputs_dict["input_ids"].to(UpperCAmelCase ) , inputs_dict["attention_mask"].to(UpperCAmelCase ) )
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self : Union[str, Any] ):
A_ = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" )
A_ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
A_ = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
A_ = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , UpperCAmelCase )
A_ = torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase , atol=1E-4 ) )
@slow
def __A ( self : List[str] ):
A_ = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" )
A_ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
A_ = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
A_ = torch.Size((1, 6, 21128) )
self.assertEqual(output.shape , UpperCAmelCase )
A_ = torch.tensor(
[[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase , atol=1E-4 ) ) | 86 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=1_8 , __SCREAMING_SNAKE_CASE=3_0 , __SCREAMING_SNAKE_CASE=4_0_0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ):
snake_case__ : Any = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case__ : List[Any] = parent
snake_case__ : int = batch_size
snake_case__ : List[Any] = num_channels
snake_case__ : str = image_size
snake_case__ : Union[str, Any] = min_resolution
snake_case__ : List[Any] = max_resolution
snake_case__ : Tuple = do_resize
snake_case__ : int = size
snake_case__ : Tuple = do_normalize
snake_case__ : Dict = image_mean
snake_case__ : Union[str, Any] = image_std
def __UpperCamelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = DPTImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self ):
snake_case__ : str = DPTImageProcessingTester(self )
@property
def __UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_std""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """size""" ) )
def __UpperCamelCase ( self ):
snake_case__ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} )
snake_case__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
snake_case__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
snake_case__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : Any = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
snake_case__ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 38 | 0 |
_lowerCamelCase : Optional[int] = {
"""Pillow""": """Pillow""",
"""accelerate""": """accelerate>=0.11.0""",
"""compel""": """compel==0.1.8""",
"""black""": """black~=23.1""",
"""datasets""": """datasets""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.13.2""",
"""requests-mock""": """requests-mock==1.10.0""",
"""importlib_metadata""": """importlib_metadata""",
"""invisible-watermark""": """invisible-watermark""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2""",
"""jaxlib""": """jaxlib>=0.1.65""",
"""Jinja2""": """Jinja2""",
"""k-diffusion""": """k-diffusion>=0.0.12""",
"""torchsde""": """torchsde""",
"""note_seq""": """note_seq""",
"""librosa""": """librosa""",
"""numpy""": """numpy""",
"""omegaconf""": """omegaconf""",
"""parameterized""": """parameterized""",
"""protobuf""": """protobuf>=3.20.3,<4""",
"""pytest""": """pytest""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""ruff""": """ruff>=0.0.241""",
"""safetensors""": """safetensors""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""scipy""": """scipy""",
"""onnx""": """onnx""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""tensorboard""": """tensorboard""",
"""torch""": """torch>=1.4""",
"""torchvision""": """torchvision""",
"""transformers""": """transformers>=4.25.1""",
"""urllib3""": """urllib3<=2.0.0""",
}
| 87 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """embed_dim""" ) )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """num_heads""" ) )
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[1_6, 4_8, 9_6] , __SCREAMING_SNAKE_CASE=[1, 3, 6] , __SCREAMING_SNAKE_CASE=[1, 2, 1_0] , __SCREAMING_SNAKE_CASE=[7, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2] , __SCREAMING_SNAKE_CASE=[2, 1, 1] , __SCREAMING_SNAKE_CASE=[2, 2, 2] , __SCREAMING_SNAKE_CASE=[False, False, True] , __SCREAMING_SNAKE_CASE=[0.0, 0.0, 0.0] , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-1_2 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=2 , ):
snake_case__ : List[str] = parent
snake_case__ : Tuple = batch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : List[Any] = patch_sizes
snake_case__ : Optional[int] = patch_stride
snake_case__ : Optional[Any] = patch_padding
snake_case__ : Any = is_training
snake_case__ : int = use_labels
snake_case__ : Dict = num_labels
snake_case__ : Optional[Any] = num_channels
snake_case__ : Optional[Any] = embed_dim
snake_case__ : Optional[int] = num_heads
snake_case__ : Optional[int] = stride_kv
snake_case__ : int = depth
snake_case__ : Optional[Any] = cls_token
snake_case__ : List[Any] = attention_drop_rate
snake_case__ : Union[str, Any] = initializer_range
snake_case__ : List[Any] = layer_norm_eps
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : List[Any] = None
if self.use_labels:
# create a random int32 tensor of given shape
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : List[str] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = TFCvtModel(config=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = (self.image_size, self.image_size)
snake_case__ , snake_case__ : str = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
snake_case__ : Any = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
snake_case__ : Optional[int] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = self.num_labels
snake_case__ : str = TFCvtForImageClassification(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Any = config_and_inputs
snake_case__ : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
lowerCamelCase__ = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = TFCvtModelTester(self )
snake_case__ : Any = TFCvtConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def __UpperCamelCase ( self ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def __UpperCamelCase ( self ):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def __UpperCamelCase ( self ):
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def __UpperCamelCase ( self ):
snake_case__ : List[str] = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(__SCREAMING_SNAKE_CASE )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Optional[Any] = [*signature.parameters.keys()]
snake_case__ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
def check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[int] = outputs.hidden_states
snake_case__ : Tuple = len(self.model_tester.depth )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[Any] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : List[str] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def __UpperCamelCase ( self ):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : str = TFCvtModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self ):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case__ : Union[str, Any] = self.default_image_processor
snake_case__ : int = prepare_img()
snake_case__ : Dict = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""tf""" )
# forward pass
snake_case__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
snake_case__ : str = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : int = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 38 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class lowercase__ :
def __init__( self , SCREAMING_SNAKE_CASE , ) -> Dict:
_lowerCamelCase : int = parent
_lowerCamelCase : Union[str, Any] = 13
_lowerCamelCase : List[str] = 7
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Dict = True
_lowerCamelCase : Any = False
_lowerCamelCase : List[Any] = True
_lowerCamelCase : Tuple = 99
_lowerCamelCase : Optional[Any] = 32
_lowerCamelCase : int = 2
_lowerCamelCase : Dict = 4
_lowerCamelCase : List[Any] = 37
_lowerCamelCase : Optional[int] = """gelu"""
_lowerCamelCase : str = 0.1
_lowerCamelCase : Optional[Any] = 0.1
_lowerCamelCase : int = 512
_lowerCamelCase : Optional[Any] = 16
_lowerCamelCase : Tuple = 2
_lowerCamelCase : str = 0.02
_lowerCamelCase : int = 3
_lowerCamelCase : Optional[int] = 4
_lowerCamelCase : Union[str, Any] = None
def UpperCamelCase_ ( self) -> Optional[Any]:
_lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_lowerCamelCase : List[str] = None
if self.use_input_mask:
_lowerCamelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length])
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : List[str] = None
_lowerCamelCase : Any = None
if self.use_labels:
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices)
_lowerCamelCase : Optional[int] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> List[str]:
_lowerCamelCase : List[Any] = TFDistilBertModel(config=SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_lowerCamelCase : Any = model(SCREAMING_SNAKE_CASE)
_lowerCamelCase : int = [input_ids, input_mask]
_lowerCamelCase : int = model(SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> List[Any]:
_lowerCamelCase : List[str] = TFDistilBertForMaskedLM(config=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_lowerCamelCase : Optional[int] = model(SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> int:
_lowerCamelCase : str = TFDistilBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
_lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Optional[int]:
_lowerCamelCase : Optional[Any] = self.num_labels
_lowerCamelCase : Any = TFDistilBertForSequenceClassification(SCREAMING_SNAKE_CASE)
_lowerCamelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> str:
_lowerCamelCase : Dict = self.num_choices
_lowerCamelCase : int = TFDistilBertForMultipleChoice(SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[str] = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1) , (1, self.num_choices, 1))
_lowerCamelCase : List[str] = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1) , (1, self.num_choices, 1))
_lowerCamelCase : Tuple = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
_lowerCamelCase : Any = model(SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> int:
_lowerCamelCase : str = self.num_labels
_lowerCamelCase : int = TFDistilBertForTokenClassification(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_lowerCamelCase : Tuple = model(SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase_ ( self) -> List[str]:
_lowerCamelCase : int = self.prepare_config_and_inputs()
((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) : List[str] = config_and_inputs
_lowerCamelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowercase__ ( A_ ,A_ ,unittest.TestCase ):
__UpperCAmelCase = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__UpperCAmelCase = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
def UpperCamelCase_ ( self) -> List[str]:
_lowerCamelCase : str = TFDistilBertModelTester(self)
_lowerCamelCase : Any = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , dim=37)
def UpperCamelCase_ ( self) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self) -> str:
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> str:
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Dict:
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> int:
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Dict:
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> List[Any]:
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*SCREAMING_SNAKE_CASE)
@slow
def UpperCamelCase_ ( self) -> List[str]:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]):
_lowerCamelCase : Tuple = TFDistilBertModel.from_pretrained(SCREAMING_SNAKE_CASE)
self.assertIsNotNone(SCREAMING_SNAKE_CASE)
@require_tf
class lowercase__ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self) -> List[Any]:
_lowerCamelCase : Union[str, Any] = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""")
_lowerCamelCase : Optional[int] = tf.constant([[0, 1, 2, 3, 4, 5]])
_lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE)[0]
_lowerCamelCase : str = [1, 6, 768]
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[str] = tf.constant(
[
[
[0.19_26_18_85, -0.13_73_29_55, 0.4_11_97_99],
[0.22_15_01_56, -0.07_42_26_61, 0.39_03_72_04],
[0.22_75_60_18, -0.0_89_64_14, 0.3_70_14_67],
]
])
tf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4)
| 88 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
snake_case__ : int = [[1, 2, 4], [1, 2, 3, 4]]
snake_case__ : Any = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
self.assertTrue(isinstance(dc.token_ids , __SCREAMING_SNAKE_CASE ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __UpperCamelCase ( self ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
snake_case__ : Union[str, Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) # fails here
def __UpperCamelCase ( self ):
snake_case__ : List[str] = [[1, 2, 3], [1, 2, 4]]
snake_case__ : Optional[int] = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ , snake_case__ : Any = dc.update(1 )
snake_case__ : Any = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : Tuple = dc.update(2 )
snake_case__ : Tuple = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = dc.update(3 )
snake_case__ : List[str] = stepped is True and completed is True and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
snake_case__ : int = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ , snake_case__ : str = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : str = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : Dict = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 38 | 0 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _lowerCamelCase( _a ):
def __init__( self, *lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
super().__init__(*lowerCamelCase, **lowerCamelCase)
_lowercase : Any = eval_examples
_lowercase : List[Any] = post_process_function
def UpperCamelCase ( self, lowerCamelCase = None, lowerCamelCase=None, lowerCamelCase = None, lowerCamelCase = "eval", **lowerCamelCase, ) -> Dict[str, float]:
"""simple docstring"""
_lowercase : Optional[Any] = gen_kwargs.copy()
_lowercase : List[Any] = (
gen_kwargs['max_length'] if gen_kwargs.get('max_length') is not None else self.args.generation_max_length
)
_lowercase : Any = (
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams') is not None else self.args.generation_num_beams
)
_lowercase : Optional[Any] = gen_kwargs
_lowercase : Optional[int] = self.eval_dataset if eval_dataset is None else eval_dataset
_lowercase : Optional[int] = self.get_eval_dataloader(lowerCamelCase)
_lowercase : Optional[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_lowercase : List[Any] = self.compute_metrics
_lowercase : int = None
_lowercase : Optional[Any] = time.time()
_lowercase : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_lowercase : int = eval_loop(
lowerCamelCase, description='Evaluation', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=lowerCamelCase, metric_key_prefix=lowerCamelCase, )
finally:
_lowercase : Union[str, Any] = compute_metrics
_lowercase : Optional[int] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowerCamelCase, lowerCamelCase, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size), ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_lowercase : Dict = self.post_process_function(lowerCamelCase, lowerCamelCase, lowerCamelCase)
_lowercase : List[Any] = self.compute_metrics(lowerCamelCase)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F'''{metric_key_prefix}_'''):
_lowercase : Optional[int] = metrics.pop(lowerCamelCase)
metrics.update(output.metrics)
else:
_lowercase : Dict = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowerCamelCase)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
_lowercase : List[str] = self.callback_handler.on_evaluate(self.args, self.state, self.control, lowerCamelCase)
return metrics
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, lowerCamelCase = "test", **lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : str = gen_kwargs.copy()
_lowercase : str = self.get_test_dataloader(lowerCamelCase)
# Temporarily disable metric computation, we will do it in the loop here.
_lowercase : List[str] = self.compute_metrics
_lowercase : Any = None
_lowercase : str = time.time()
_lowercase : Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_lowercase : str = eval_loop(
lowerCamelCase, description='Prediction', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=lowerCamelCase, metric_key_prefix=lowerCamelCase, )
finally:
_lowercase : int = compute_metrics
_lowercase : int = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowerCamelCase, lowerCamelCase, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size), ))
if self.post_process_function is None or self.compute_metrics is None:
return output
_lowercase : Dict = self.post_process_function(lowerCamelCase, lowerCamelCase, lowerCamelCase, 'predict')
_lowercase : Optional[Any] = self.compute_metrics(lowerCamelCase)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F'''{metric_key_prefix}_'''):
_lowercase : Optional[Any] = metrics.pop(lowerCamelCase)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=lowerCamelCase)
| 89 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Optional[int] = logging.get_logger(__name__)
A_ : Tuple = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''segformer'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[2, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[8, 4, 2, 1] , __SCREAMING_SNAKE_CASE=[3_2, 6_4, 1_6_0, 2_5_6] , __SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[1, 2, 5, 8] , __SCREAMING_SNAKE_CASE=[4, 4, 4, 4] , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1e-6 , __SCREAMING_SNAKE_CASE=2_5_6 , __SCREAMING_SNAKE_CASE=2_5_5 , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __SCREAMING_SNAKE_CASE , )
snake_case__ : Dict = num_channels
snake_case__ : Optional[Any] = num_encoder_blocks
snake_case__ : Any = depths
snake_case__ : Optional[int] = sr_ratios
snake_case__ : Tuple = hidden_sizes
snake_case__ : List[str] = patch_sizes
snake_case__ : str = strides
snake_case__ : Optional[int] = mlp_ratios
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : Dict = hidden_act
snake_case__ : Optional[int] = hidden_dropout_prob
snake_case__ : List[str] = attention_probs_dropout_prob
snake_case__ : List[Any] = classifier_dropout_prob
snake_case__ : int = initializer_range
snake_case__ : List[str] = drop_path_rate
snake_case__ : int = layer_norm_eps
snake_case__ : List[Any] = decoder_hidden_size
snake_case__ : List[Any] = kwargs.get("""reshape_last_stage""" , __SCREAMING_SNAKE_CASE )
snake_case__ : Dict = semantic_loss_ignore_index
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1e-4
@property
def __UpperCamelCase ( self ):
return 1_2
| 38 | 0 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Dict = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=0 ) -> Tuple:
lowerCAmelCase__ = np.random.RandomState(lowerCamelCase_ )
lowerCAmelCase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs()
lowerCAmelCase__ = pipe(**lowerCamelCase_ ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase__ = np.array([0.65_072, 0.58_492, 0.48_219, 0.55_521, 0.53_180, 0.55_939, 0.50_697, 0.39_800, 0.46_455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase__ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs()
lowerCAmelCase__ = pipe(**lowerCamelCase_ ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase__ = np.array([0.65_863, 0.59_425, 0.49_326, 0.56_313, 0.53_875, 0.56_627, 0.51_065, 0.39_777, 0.46_330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs()
lowerCAmelCase__ = pipe(**lowerCamelCase_ ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase__ = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase__ = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs()
lowerCAmelCase__ = pipe(**lowerCamelCase_ ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase__ = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase__ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs()
lowerCAmelCase__ = pipe(**lowerCamelCase_ ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase__ = np.array([0.53_817, 0.60_812, 0.47_384, 0.49_530, 0.51_894, 0.49_814, 0.47_984, 0.38_958, 0.44_271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs()
lowerCAmelCase__ = pipe(**lowerCamelCase_ ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase__ = np.array([0.53_895, 0.60_808, 0.47_933, 0.49_608, 0.51_886, 0.49_950, 0.48_053, 0.38_957, 0.44_200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs()
lowerCAmelCase__ = 3 * [inputs['''prompt''']]
# forward
lowerCAmelCase__ = pipe(**lowerCamelCase_ )
lowerCAmelCase__ = output.images[0, -3:, -3:, -1]
lowerCAmelCase__ = self.get_dummy_inputs()
lowerCAmelCase__ = 3 * [inputs.pop('''prompt''' )]
lowerCAmelCase__ = pipe.tokenizer(
lowerCamelCase_ , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=lowerCamelCase_ , return_tensors='''np''' , )
lowerCAmelCase__ = text_inputs['''input_ids''']
lowerCAmelCase__ = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
lowerCAmelCase__ = prompt_embeds
# forward
lowerCAmelCase__ = pipe(**lowerCamelCase_ )
lowerCAmelCase__ = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs()
lowerCAmelCase__ = 3 * ['''this is a negative prompt''']
lowerCAmelCase__ = negative_prompt
lowerCAmelCase__ = 3 * [inputs['''prompt''']]
# forward
lowerCAmelCase__ = pipe(**lowerCamelCase_ )
lowerCAmelCase__ = output.images[0, -3:, -3:, -1]
lowerCAmelCase__ = self.get_dummy_inputs()
lowerCAmelCase__ = 3 * [inputs.pop('''prompt''' )]
lowerCAmelCase__ = []
for p in [prompt, negative_prompt]:
lowerCAmelCase__ = pipe.tokenizer(
lowerCamelCase_ , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=lowerCamelCase_ , return_tensors='''np''' , )
lowerCAmelCase__ = text_inputs['''input_ids''']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
lowerCAmelCase__ , lowerCAmelCase__ = embeds
# forward
lowerCAmelCase__ = pipe(**lowerCamelCase_ )
lowerCAmelCase__ = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class a__ ( unittest.TestCase ):
'''simple docstring'''
@property
def __SCREAMING_SNAKE_CASE ( self ) -> str:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = ort.SessionOptions()
lowerCAmelCase__ = False
return options
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
# using the PNDM scheduler by default
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''A painting of a squirrel eating a burger'''
np.random.seed(0 )
lowerCAmelCase__ = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type='''np''' )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.0_452, 0.0_390, 0.0_087, 0.0_350, 0.0_617, 0.0_364, 0.0_544, 0.0_523, 0.0_720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = DDIMScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''open neural network exchange'''
lowerCAmelCase__ = np.random.RandomState(0 )
lowerCAmelCase__ = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCamelCase_ , output_type='''np''' )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.2_867, 0.1_974, 0.1_481, 0.7_294, 0.7_251, 0.6_667, 0.4_194, 0.5_642, 0.6_486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''open neural network exchange'''
lowerCAmelCase__ = np.random.RandomState(0 )
lowerCAmelCase__ = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCamelCase_ , output_type='''np''' )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.2_306, 0.1_959, 0.1_593, 0.6_549, 0.6_394, 0.5_408, 0.5_065, 0.6_010, 0.6_161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = 0
def test_callback_fn(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> None:
lowerCAmelCase__ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
lowerCAmelCase__ = latents[0, -3:, -3:, -1]
lowerCAmelCase__ = np.array(
[-0.6_772, -0.3_835, -1.2_456, 0.1_905, -1.0_974, 0.6_967, -1.9_353, 0.0_178, 1.0_167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
lowerCAmelCase__ = latents[0, -3:, -3:, -1]
lowerCAmelCase__ = np.array(
[-0.3_351, 0.2_241, -0.1_837, -0.2_325, -0.6_577, 0.3_393, -0.0_241, 0.5_899, 1.3_875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
lowerCAmelCase__ = False
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''Andromeda galaxy in a bottle'''
lowerCAmelCase__ = np.random.RandomState(0 )
pipe(
prompt=lowerCamelCase_ , num_inference_steps=5 , guidance_scale=7.5 , generator=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert pipe.safety_checker is None
lowerCAmelCase__ = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowerCAmelCase__ = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None | 90 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : List[Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = None
if token is not None:
snake_case__ : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : List[Any] = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : str = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Tuple = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : Dict = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
snake_case__ : Union[str, Any] = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : Dict = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Dict = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Dict = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : Any = result.headers["""Location"""]
snake_case__ : Tuple = requests.get(__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : int = os.path.join(__magic_name__ , f"{artifact_name}.zip" )
with open(__magic_name__ , """wb""" ) as fp:
fp.write(response.content )
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : str=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Any = []
snake_case__ : Union[str, Any] = []
snake_case__ : Any = None
with zipfile.ZipFile(__magic_name__ ) as z:
for filename in z.namelist():
if not os.path.isdir(__magic_name__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__magic_name__ ) as f:
for line in f:
snake_case__ : Any = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
snake_case__ : str = line[: line.index(""": """ )]
snake_case__ : Optional[int] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
snake_case__ : Dict = line[len("""FAILED """ ) :]
failed_tests.append(__magic_name__ )
elif filename == "job_name.txt":
snake_case__ : Optional[Any] = line
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError(
f"`errors` and `failed_tests` should have the same number of elements. Got {len(__magic_name__ )} for `errors` "
f"and {len(__magic_name__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
""" problem.""" )
snake_case__ : Optional[Any] = None
if job_name and job_links:
snake_case__ : Optional[Any] = job_links.get(__magic_name__ , __magic_name__ )
# A list with elements of the form (line of error, error, failed test)
snake_case__ : List[Any] = [x + [y] + [job_link] for x, y in zip(__magic_name__ , __magic_name__ )]
return result
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : Union[str, Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = []
snake_case__ : Dict = [os.path.join(__magic_name__ , __magic_name__ ) for p in os.listdir(__magic_name__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__magic_name__ , job_links=__magic_name__ ) )
return errors
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : str=None ) -> List[Any]:
'''simple docstring'''
snake_case__ : Any = Counter()
counter.update([x[1] for x in logs] )
snake_case__ : Dict = counter.most_common()
snake_case__ : Any = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
snake_case__ : int = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case__ : str = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
snake_case__ : Tuple = test.split("""/""" )[2]
else:
snake_case__ : Any = None
return test
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : Union[str, Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : List[str] = [(x[0], x[1], get_model(x[2] )) for x in logs]
snake_case__ : List[Any] = [x for x in logs if x[2] is not None]
snake_case__ : Any = {x[2] for x in logs}
snake_case__ : Optional[Any] = {}
for test in tests:
snake_case__ : str = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
snake_case__ : Optional[int] = counter.most_common()
snake_case__ : Optional[int] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
snake_case__ : int = sum(error_counts.values() )
if n_errors > 0:
snake_case__ : str = {"""count""": n_errors, """errors""": error_counts}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : int ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Optional[Any] = """| no. | error | status |"""
snake_case__ : int = """|-:|:-|:-|"""
snake_case__ : int = [header, sep]
for error in reduced_by_error:
snake_case__ : Union[str, Any] = reduced_by_error[error]["""count"""]
snake_case__ : Dict = f"| {count} | {error[:1_00]} | |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : Dict ) -> List[Any]:
'''simple docstring'''
snake_case__ : List[Any] = """| model | no. of errors | major error | count |"""
snake_case__ : Optional[int] = """|-:|-:|-:|-:|"""
snake_case__ : Dict = [header, sep]
for model in reduced_by_model:
snake_case__ : Tuple = reduced_by_model[model]["""count"""]
snake_case__ , snake_case__ : Tuple = list(reduced_by_model[model]["""errors"""].items() )[0]
snake_case__ : Optional[int] = f"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
A_ : int = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
A_ : Optional[int] = get_job_links(args.workflow_run_id, token=args.token)
A_ : Optional[Any] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
A_ : int = k.find(" / ")
A_ : List[Any] = k[index + len(" / ") :]
A_ : List[str] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
A_ : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
A_ : str = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
A_ : List[str] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
A_ : Any = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
A_ : Any = reduce_by_error(errors)
A_ : Union[str, Any] = reduce_by_model(errors)
A_ : Any = make_github_table(reduced_by_error)
A_ : Optional[Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 38 | 0 |
"""simple docstring"""
import argparse
import datetime
def _snake_case ( snake_case__ : str ):
A = {
'0': 'Sunday',
'1': 'Monday',
'2': 'Tuesday',
'3': 'Wednesday',
'4': 'Thursday',
'5': 'Friday',
'6': 'Saturday',
}
A = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(snake_case__ ) < 11:
raise ValueError('Must be 10 characters long' )
# Get month
A = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('Month must be between 1 - 12' )
A = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get day
A = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('Date must be between 1 - 31' )
# Get second separator
A = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get year
A = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
'Year out of range. There has to be some sort of limit...right?' )
# Get datetime obj for validation
A = datetime.date(int(snake_case__ ) , int(snake_case__ ) , int(snake_case__ ) )
# Start math
if m <= 2:
A = y - 1
A = m + 12
# maths var
A = int(str(snake_case__ )[:2] )
A = int(str(snake_case__ )[2:] )
A = int(2.6 * m - 5.39 )
A = int(c / 4 )
A = int(k / 4 )
A = int(d + k )
A = int(t + u + v + x )
A = int(z - (2 * c) )
A = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('The date was evaluated incorrectly. Contact developer.' )
# Response
A = F'Your date {date_input}, is a {days[str(snake_case__ )]}!'
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
_lowercase = parser.parse_args()
zeller(args.date_input) | 91 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
A_ : Tuple = get_logger(__name__)
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = '''dummy_data'''
lowerCamelCase__ = '''datasets'''
lowerCamelCase__ = False
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , ):
snake_case__ : List[Any] = 0
snake_case__ : Union[str, Any] = dataset_name
snake_case__ : Optional[int] = cache_dir
snake_case__ : Union[str, Any] = use_local_dummy_data
snake_case__ : int = config
# download_callbacks take a single url as input
snake_case__ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
snake_case__ : Union[str, Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
snake_case__ : Union[str, Any] = str(__SCREAMING_SNAKE_CASE )
# to be downloaded
snake_case__ : List[str] = None
snake_case__ : List[str] = None
@property
def __UpperCamelCase ( self ):
if self._dummy_file is None:
snake_case__ : List[str] = self.download_dummy_data()
return self._dummy_file
@property
def __UpperCamelCase ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def __UpperCamelCase ( self ):
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
snake_case__ : Optional[int] = cached_path(
__SCREAMING_SNAKE_CASE , cache_dir=self.cache_dir , extract_compressed_file=__SCREAMING_SNAKE_CASE , force_extract=__SCREAMING_SNAKE_CASE )
return os.path.join(__SCREAMING_SNAKE_CASE , self.dummy_file_name )
@property
def __UpperCamelCase ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __UpperCamelCase ( self ):
if self._bucket_url is None:
snake_case__ : List[str] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def __UpperCamelCase ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
snake_case__ : List[Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
snake_case__ : List[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.create_dummy_data_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
return self.create_dummy_data_list(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
return self.create_dummy_data_single(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return path
def __UpperCamelCase ( self ):
return {}
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for single_url in single_urls:
download_callback(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : List[str] = single_urls
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = [os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) ) for x in single_urls]
else:
snake_case__ : List[Any] = single_urls
snake_case__ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) )
snake_case__ : Optional[int] = value
# make sure that values are unique
if all(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
snake_case__ : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
snake_case__ : Tuple = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , __SCREAMING_SNAKE_CASE ) ) for url in data_url )
snake_case__ : List[Any] = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
snake_case__ : List[str] = [data_url[0]] * len(__SCREAMING_SNAKE_CASE )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : List[Any] = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(__SCREAMING_SNAKE_CASE )
return dummy_data_list
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : Any = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(__SCREAMING_SNAKE_CASE ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
def _iter_archive_members(__SCREAMING_SNAKE_CASE ):
# this preserves the order of the members inside the ZIP archive
snake_case__ : List[str] = Path(self.dummy_file ).parent
snake_case__ : Dict = path.relative_to(__SCREAMING_SNAKE_CASE )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
snake_case__ : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = Path(__SCREAMING_SNAKE_CASE )
snake_case__ : int = _iter_archive_members(__SCREAMING_SNAKE_CASE ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(__SCREAMING_SNAKE_CASE ).as_posix(), file_path.open("""rb""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = [paths]
for path in paths:
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(__SCREAMING_SNAKE_CASE ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 38 | 0 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __magic_name__ : list[int] , __magic_name__ : int ) -> list[list[int]]:
lowercase : list[list[int]] =[]
lowercase : list[int] =[]
lowercase : Any =0
lowercase : str =sum(__magic_name__ )
create_state_space_tree(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
return result
def _lowerCAmelCase ( __magic_name__ : list[int] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : list[int] , __magic_name__ : list[list[int]] , __magic_name__ : int , ) -> None:
if sum(__magic_name__ ) > max_sum or (remaining_nums_sum + sum(__magic_name__ )) < max_sum:
return
if sum(__magic_name__ ) == max_sum:
result.append(__magic_name__ )
return
for index in range(__magic_name__ , len(__magic_name__ ) ):
create_state_space_tree(
__magic_name__ , __magic_name__ , index + 1 , [*path, nums[index]] , __magic_name__ , remaining_nums_sum - nums[index] , )
UpperCamelCase_ = [3, 34, 4, 12, 5, 2]
UpperCamelCase_ = 9
UpperCamelCase_ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 92 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = IFImgaImgSuperResolutionPipeline
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __UpperCamelCase ( self ):
return self._get_superresolution_dummy_components()
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
snake_case__ : List[Any] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : Tuple = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __UpperCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __UpperCamelCase ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __UpperCamelCase ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __UpperCamelCase ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __UpperCamelCase ( self ):
self._test_save_load_local()
def __UpperCamelCase ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 38 | 0 |
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {"""vocab_file""": """spiece.model"""}
__A = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
}
}
__A = {
"""google/bigbird-roberta-base""": 4096,
"""google/bigbird-roberta-large""": 4096,
"""google/bigbird-base-trivia-itc""": 4096,
}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :int = VOCAB_FILES_NAMES
__magic_name__ :Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ :Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ :List[str] = ["""input_ids""", """attention_mask"""]
__magic_name__ :List[int] = []
def __init__( self , __UpperCAmelCase , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :Any = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else bos_token
lowerCAmelCase__ :Any = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else eos_token
lowerCAmelCase__ :Optional[int] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else unk_token
lowerCAmelCase__ :List[Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else pad_token
lowerCAmelCase__ :Tuple = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cls_token
lowerCAmelCase__ :str = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ :str = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
lowerCAmelCase__ :List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
lowerCAmelCase__ :Optional[int] = vocab_file
lowerCAmelCase__ :Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@property
def snake_case ( self ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.__dict__.copy()
lowerCAmelCase__ :Union[str, Any] = None
return state
def __setstate__( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase__ :str = {}
lowerCAmelCase__ :List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.sp_model.piece_to_id(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = self.sp_model.IdToPiece(__UpperCAmelCase )
return token
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = []
lowerCAmelCase__ :Dict = ''
lowerCAmelCase__ :Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
lowerCAmelCase__ :str = True
lowerCAmelCase__ :Any = []
else:
current_sub_tokens.append(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :str = kwargs.pop('use_source_tokenizer' , __UpperCAmelCase )
lowerCAmelCase__ :Any = self.convert_ids_to_tokens(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCAmelCase__ :List[str] = []
lowerCAmelCase__ :List[Any] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) )
lowerCAmelCase__ :List[Any] = []
sub_texts.append(__UpperCAmelCase )
else:
current_sub_text.append(__UpperCAmelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
lowerCAmelCase__ :Optional[Any] = re.sub(R' (\[(MASK|SEP)\])' , R'\1' , ' '.join(__UpperCAmelCase ) )
else:
lowerCAmelCase__ :int = ''.join(__UpperCAmelCase )
lowerCAmelCase__ :Any = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCAmelCase__ :int = self.clean_up_tokenization(__UpperCAmelCase )
return clean_text
else:
return text
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase__ :Dict = os.path.join(
__UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , 'wb' ) as fi:
lowerCAmelCase__ :Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ :Optional[int] = [self.cls_token_id]
lowerCAmelCase__ :Any = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1]
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = [self.sep_token_id]
lowerCAmelCase__ :Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 93 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
A_ : Dict = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : List[Any]=None , __magic_name__ : List[str]=None , __magic_name__ : List[str]=None ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[int] = True
while ask_again:
snake_case__ : Optional[Any] = input(__magic_name__ )
try:
if default is not None and len(__magic_name__ ) == 0:
return default
return convert_value(__magic_name__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Any=[] , __magic_name__ : Optional[int]=None , __magic_name__ : int=0 ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Union[str, Any] = BulletMenu(__magic_name__ , __magic_name__ )
snake_case__ : Optional[Any] = menu.run(default_choice=__magic_name__ )
return convert_value(__magic_name__ ) if convert_value is not None else result
def UpperCamelCase__ ( __magic_name__ : Any ) -> int:
'''simple docstring'''
snake_case__ : Tuple = int(__magic_name__ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def UpperCamelCase__ ( __magic_name__ : str ) -> Tuple:
'''simple docstring'''
snake_case__ : List[Any] = int(__magic_name__ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def UpperCamelCase__ ( __magic_name__ : List[str] ) -> List[Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = int(__magic_name__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def UpperCamelCase__ ( __magic_name__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = int(__magic_name__ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def UpperCamelCase__ ( __magic_name__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = int(__magic_name__ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def UpperCamelCase__ ( __magic_name__ : Dict ) -> Tuple:
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class __snake_case ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = super()._format_usage(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : str = usage.replace("""<command> [<args>] """ , """""" )
return usage
| 38 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = '''speech_to_text_2'''
UpperCamelCase_ = ['''past_key_values''']
UpperCamelCase_ = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Tuple , UpperCAmelCase : Dict=1_0000 , UpperCAmelCase : Dict=6 , UpperCAmelCase : Dict=2048 , UpperCAmelCase : int=4 , UpperCAmelCase : Union[str, Any]=0.0 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Dict="relu" , UpperCAmelCase : List[str]=256 , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Any=0.0 , UpperCAmelCase : int=0.0 , UpperCAmelCase : int=0.0_2 , UpperCAmelCase : Dict=2 , UpperCAmelCase : Tuple=True , UpperCAmelCase : List[Any]=1 , UpperCAmelCase : Any=0 , UpperCAmelCase : Dict=2 , UpperCAmelCase : str=1024 , **UpperCAmelCase : Optional[Any] , ) -> Dict:
'''simple docstring'''
lowercase : Dict =vocab_size
lowercase : Optional[Any] =d_model
lowercase : List[str] =decoder_ffn_dim
lowercase : Union[str, Any] =decoder_layers
lowercase : Any =decoder_attention_heads
lowercase : Optional[Any] =dropout
lowercase : Optional[int] =attention_dropout
lowercase : int =activation_dropout
lowercase : List[str] =activation_function
lowercase : List[str] =init_std
lowercase : str =decoder_layerdrop
lowercase : List[Any] =use_cache
lowercase : List[str] =decoder_layers
lowercase : List[str] =scale_embedding # scale factor will be sqrt(d_model) if True
lowercase : Any =max_target_positions
super().__init__(
pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , decoder_start_token_id=UpperCAmelCase , **UpperCAmelCase , )
| 94 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( __magic_name__ : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(__magic_name__ ) / len(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''pix2struct_text_model'''
__magic_name__ = ['''past_key_values''']
__magic_name__ = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : List[Any] , lowerCAmelCase_ : Tuple=50_244 , lowerCAmelCase_ : List[str]=768 , lowerCAmelCase_ : List[Any]=64 , lowerCAmelCase_ : Union[str, Any]=2_048 , lowerCAmelCase_ : List[str]=12 , lowerCAmelCase_ : List[Any]=12 , lowerCAmelCase_ : Dict=32 , lowerCAmelCase_ : Dict=128 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Optional[int]=1e-6 , lowerCAmelCase_ : Any=1.0 , lowerCAmelCase_ : Union[str, Any]="gelu_new" , lowerCAmelCase_ : List[str]=0 , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Optional[int]=0 , lowerCAmelCase_ : Tuple=1 , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Optional[Any]=True , **lowerCAmelCase_ : Tuple , ) -> List[Any]:
UpperCAmelCase_ : Dict = vocab_size
UpperCAmelCase_ : List[str] = hidden_size
UpperCAmelCase_ : Any = d_kv
UpperCAmelCase_ : Union[str, Any] = d_ff
UpperCAmelCase_ : Dict = num_layers
UpperCAmelCase_ : List[str] = num_heads
UpperCAmelCase_ : str = relative_attention_num_buckets
UpperCAmelCase_ : Any = relative_attention_max_distance
UpperCAmelCase_ : int = dropout_rate
UpperCAmelCase_ : List[str] = layer_norm_epsilon
UpperCAmelCase_ : int = initializer_factor
UpperCAmelCase_ : Any = use_cache
UpperCAmelCase_ : Tuple = eos_token_id
UpperCAmelCase_ : List[str] = decoder_start_token_id
# for backwards compatibility
UpperCAmelCase_ : Any = dense_act_fn
super().__init__(
pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , tie_word_embeddings=lowerCAmelCase_ , is_decoder=lowerCAmelCase_ , **lowerCAmelCase_ , )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str , lowerCAmelCase_ : Union[str, os.PathLike] , **lowerCAmelCase_ : int ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
UpperCAmelCase_ : Optional[int] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class UpperCamelCase_ (__A ):
__magic_name__ = '''pix2struct_vision_model'''
def __init__( self : Tuple , lowerCAmelCase_ : int=768 , lowerCAmelCase_ : List[str]=768 , lowerCAmelCase_ : Optional[Any]=2_048 , lowerCAmelCase_ : Optional[Any]=64 , lowerCAmelCase_ : Dict=12 , lowerCAmelCase_ : Tuple=12 , lowerCAmelCase_ : Optional[int]="gelu_new" , lowerCAmelCase_ : Dict=1e-6 , lowerCAmelCase_ : Dict=0.0 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : Optional[int]=1e-10 , lowerCAmelCase_ : Dict=1.0 , lowerCAmelCase_ : Any=4_096 , lowerCAmelCase_ : Any=32 , lowerCAmelCase_ : Optional[int]=128 , **lowerCAmelCase_ : List[Any] , ) -> str:
super().__init__(**lowerCAmelCase_ )
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Tuple = patch_embed_hidden_size
UpperCAmelCase_ : int = d_ff
UpperCAmelCase_ : int = dropout_rate
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : str = initializer_factor
UpperCAmelCase_ : List[str] = attention_dropout
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : Dict = dense_act_fn
UpperCAmelCase_ : Tuple = seq_len
UpperCAmelCase_ : Optional[Any] = relative_attention_num_buckets
UpperCAmelCase_ : Union[str, Any] = relative_attention_max_distance
UpperCAmelCase_ : Optional[Any] = d_kv
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str , lowerCAmelCase_ : Union[str, os.PathLike] , **lowerCAmelCase_ : Optional[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
UpperCAmelCase_ : Dict = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class UpperCamelCase_ (__A ):
__magic_name__ = '''pix2struct'''
__magic_name__ = True
def __init__( self : str , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Any=1.0 , lowerCAmelCase_ : Tuple=0.0_2 , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : str=True , **lowerCAmelCase_ : int , ) -> Any:
super().__init__(tie_word_embeddings=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
if text_config is None:
UpperCAmelCase_ : Union[str, Any] = {}
logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." )
if vision_config is None:
UpperCAmelCase_ : List[Any] = {}
logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." )
UpperCAmelCase_ : str = PixaStructTextConfig(**lowerCAmelCase_ )
UpperCAmelCase_ : Any = PixaStructVisionConfig(**lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = self.text_config.decoder_start_token_id
UpperCAmelCase_ : Tuple = self.text_config.pad_token_id
UpperCAmelCase_ : List[str] = self.text_config.eos_token_id
UpperCAmelCase_ : Optional[Any] = initializer_factor
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : List[str] = self.initializer_range
UpperCAmelCase_ : Tuple = self.initializer_range
UpperCAmelCase_ : Union[str, Any] = is_vqa
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Tuple , lowerCAmelCase_ : PixaStructTextConfig , lowerCAmelCase_ : PixaStructVisionConfig , **lowerCAmelCase_ : Union[str, Any] ) -> int:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
UpperCAmelCase_ : List[str] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Optional[int] = self.text_config.to_dict()
UpperCAmelCase_ : Any = self.vision_config.to_dict()
UpperCAmelCase_ : Optional[Any] = self.__class__.model_type
return output
| 95 |
'''simple docstring'''
from __future__ import annotations
A_ : str = "Muhammad Umer Farooq"
A_ : Optional[Any] = "MIT"
A_ : int = "1.0.0"
A_ : int = "Muhammad Umer Farooq"
A_ : int = "[email protected]"
A_ : Dict = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
super().__init__()
snake_case__ : list[str] = []
snake_case__ : List[Any] = domain
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
snake_case__ : str = parse.urljoin(self.domain , __SCREAMING_SNAKE_CASE )
self.urls.append(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
return ".".join(get_sub_domain_name(__magic_name__ ).split(""".""" )[-2:] )
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
return parse.urlparse(__magic_name__ ).netloc
def UpperCamelCase__ ( __magic_name__ : str = "https://github.com" ) -> list[str]:
'''simple docstring'''
snake_case__ : List[str] = get_domain_name(__magic_name__ )
# Initialize the parser
snake_case__ : Optional[Any] = Parser(__magic_name__ )
try:
# Open URL
snake_case__ : Any = requests.get(__magic_name__ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
snake_case__ : List[str] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
snake_case__ : Tuple = requests.get(__magic_name__ )
# Get the valid email.
snake_case__ : List[str] = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__magic_name__ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__magic_name__ )
if __name__ == "__main__":
A_ : str = emails_from_url("https://github.com")
print(F'{len(emails)} emails found:')
print("\n".join(sorted(emails)))
| 38 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = field(default="automatic-speech-recognition" ,metadata={"include_in_asdict_even_if_is_default": True} )
UpperCAmelCase__ = Features({"audio": Audio()} )
UpperCAmelCase__ = Features({"transcription": Value("string" )} )
UpperCAmelCase__ = "audio"
UpperCAmelCase__ = "transcription"
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : Union[str, Any] ) -> Any:
if self.audio_column not in features:
raise ValueError(F'Column {self.audio_column} is not present in features.' )
if not isinstance(features[self.audio_column] , __snake_case ):
raise ValueError(F'Column {self.audio_column} is not an Audio type.' )
__magic_name__: List[str] = copy.deepcopy(self )
__magic_name__: Tuple = self.input_schema.copy()
__magic_name__: Dict = features[self.audio_column]
__magic_name__: List[str] = input_schema
return task_template
@property
def lowerCamelCase__ ( self : List[str] ) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 96 |
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> Tuple:
'''simple docstring'''
if not head:
return True
# split the list to two parts
snake_case__ , snake_case__ : Dict = head.next, head
while fast and fast.next:
snake_case__ : Any = fast.next.next
snake_case__ : int = slow.next
snake_case__ : Dict = slow.next
snake_case__ : List[str] = None # Don't forget here! But forget still works!
# reverse the second part
snake_case__ : Tuple = None
while second:
snake_case__ : Tuple = second.next
snake_case__ : Any = node
snake_case__ : str = second
snake_case__ : Optional[Any] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
snake_case__ : List[Any] = node.next
snake_case__ : int = head.next
return True
def UpperCamelCase__ ( __magic_name__ : Any ) -> Optional[Any]:
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
snake_case__ : List[Any] = head
while fast and fast.next:
snake_case__ , snake_case__ : Any = fast.next.next, slow.next
# 2. Push the second half into the stack
snake_case__ : Tuple = [slow.val]
while slow.next:
snake_case__ : Optional[Any] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
snake_case__ : str = cur.next
return True
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
if not head or not head.next:
return True
snake_case__ : int = {}
snake_case__ : Union[str, Any] = 0
while head:
if head.val in d:
d[head.val].append(__magic_name__ )
else:
snake_case__ : Tuple = [pos]
snake_case__ : Optional[Any] = head.next
pos += 1
snake_case__ : int = pos - 1
snake_case__ : str = 0
for v in d.values():
if len(__magic_name__ ) % 2 != 0:
middle += 1
else:
snake_case__ : List[str] = 0
for i in range(0 , len(__magic_name__ ) ):
if v[i] + v[len(__magic_name__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 38 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 97 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A_ : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A_ : str = 250004
A_ : str = 250020
@require_sentencepiece
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MBartTokenizer
lowerCamelCase__ = MBartTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
def __UpperCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case__ : Tuple = MBartTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = MBartTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
snake_case__ : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
snake_case__ : Optional[int] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def __UpperCamelCase ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case__ : Optional[int] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = tempfile.mkdtemp()
snake_case__ : int = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
snake_case__ : List[str] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case__ : Tuple = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=True
snake_case__ : Any = tempfile.mkdtemp()
snake_case__ : Optional[int] = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case__ : List[Any] = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=False
snake_case__ : Dict = tempfile.mkdtemp()
snake_case__ : Union[str, Any] = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case__ : Dict = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = '''facebook/mbart-large-en-ro'''
lowerCamelCase__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
lowerCamelCase__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
lowerCamelCase__ = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def __UpperCamelCase ( cls ):
snake_case__ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
snake_case__ : Any = 1
return cls
def __UpperCamelCase ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 2_5_0_0_2_0 )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
self.assertIn(__SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids )
snake_case__ : List[str] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
snake_case__ : List[Any] = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = ["""this is gunna be a long sentence """ * 2_0]
assert isinstance(src_text[0] , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = 1_0
snake_case__ : int = self.tokenizer(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __SCREAMING_SNAKE_CASE )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = tempfile.mkdtemp()
snake_case__ : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = MBartTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __SCREAMING_SNAKE_CASE )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
snake_case__ : int = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
snake_case__ : List[str] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
snake_case__ : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.tokenizer(self.src_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=3 , return_tensors="""pt""" )
snake_case__ : Optional[int] = self.tokenizer(
text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=1_0 , return_tensors="""pt""" )
snake_case__ : str = targets["""input_ids"""]
snake_case__ : Optional[Any] = shift_tokens_right(__SCREAMING_SNAKE_CASE , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , {
# A, test, EOS, en_XX
"""input_ids""": [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 2_5_0_0_0_1,
} , )
| 38 | 0 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
lowercase__ : List[str] = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : int , lowerCAmelCase__ : Path , lowerCAmelCase__ : Union[str, None] = None , lowerCAmelCase__ : Union[List[str], None] = None , lowerCAmelCase__ : Union[str, List[str], None] = None , lowerCAmelCase__ : bool = True , ) -> Dict:
'''simple docstring'''
_UpperCamelCase = [file for file in os.listdir(lowerCAmelCase__ ) if os.path.isfile(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) )]
if identifier is not None:
_UpperCamelCase = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
for n_ in n_identifier:
_UpperCamelCase = [file for file in files if n_ not in file]
else:
_UpperCamelCase = [file for file in files if n_identifier not in file]
_UpperCamelCase = ignore_files or []
ignore_files.append('''__init__.py''' )
_UpperCamelCase = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('''Testing''' , lowerCAmelCase__ )
if only_modules:
_UpperCamelCase = file.split('''.''' )[0]
try:
_UpperCamelCase = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = doctest.DocTestSuite(lowerCAmelCase__ )
_UpperCamelCase = unittest.TextTestRunner().run(lowerCAmelCase__ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""" )
else:
_UpperCamelCase = doctest.testfile(str('''..''' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def snake_case__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = Path('''src/transformers''' )
_UpperCamelCase = '''modeling'''
_UpperCamelCase = [
'''modeling_ctrl.py''',
'''modeling_tf_ctrl.py''',
]
self.analyze_directory(lowerCAmelCase__ , identifier=lowerCAmelCase__ , ignore_files=lowerCAmelCase__ )
def snake_case__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = Path('''src/transformers''' )
_UpperCamelCase = '''tokenization'''
self.analyze_directory(lowerCAmelCase__ , identifier=lowerCAmelCase__ )
def snake_case__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = Path('''src/transformers''' )
_UpperCamelCase = '''configuration'''
self.analyze_directory(lowerCAmelCase__ , identifier=lowerCAmelCase__ )
def snake_case__ ( self : str ) -> int:
'''simple docstring'''
_UpperCamelCase = Path('''src/transformers''' )
_UpperCamelCase = ['''configuration''', '''modeling''', '''tokenization''']
self.analyze_directory(lowerCAmelCase__ , n_identifier=lowerCAmelCase__ )
def snake_case__ ( self : str ) -> Any:
'''simple docstring'''
_UpperCamelCase = Path('''docs/source''' )
_UpperCamelCase = ['''favicon.ico''']
self.analyze_directory(lowerCAmelCase__ , ignore_files=lowerCAmelCase__ , only_modules=lowerCAmelCase__ )
| 98 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : Dict = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''bit'''
lowerCamelCase__ = ['''preactivation''', '''bottleneck''']
lowerCamelCase__ = ['''SAME''', '''VALID''']
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE="preactivation" , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
snake_case__ : Tuple = global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported" )
snake_case__ : List[str] = num_channels
snake_case__ : Tuple = embedding_size
snake_case__ : str = hidden_sizes
snake_case__ : Optional[Any] = depths
snake_case__ : List[Any] = layer_type
snake_case__ : Dict = hidden_act
snake_case__ : Union[str, Any] = global_padding
snake_case__ : List[str] = num_groups
snake_case__ : str = drop_path_rate
snake_case__ : List[Any] = embedding_dynamic_padding
snake_case__ : List[str] = output_stride
snake_case__ : Dict = width_factor
snake_case__ : List[str] = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
snake_case__ , snake_case__ : Dict = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 38 | 0 |
from collections import deque
def a (lowerCAmelCase__ ):
__a = len(lowerCAmelCase__ )
__a = deque()
__a = [False for _ in range(lowerCAmelCase__ )]
__a = [-1 for _ in range(lowerCAmelCase__ )]
__a = index_of[:]
def strong_connect(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__a = index # the number when this node is seen
__a = index # lowest rank node reachable from here
index += 1
stack.append(lowerCAmelCase__ )
__a = True
for w in g[v]:
if index_of[w] == -1:
__a = strong_connect(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__a = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__a = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__a = []
__a = stack.pop()
__a = False
component.append(lowerCAmelCase__ )
while w != v:
__a = stack.pop()
__a = False
component.append(lowerCAmelCase__ )
components.append(lowerCAmelCase__ )
return index
__a = []
for v in range(lowerCAmelCase__ ):
if index_of[v] == -1:
strong_connect(lowerCAmelCase__ , 0 , lowerCAmelCase__ )
return components
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
__a = [[] for _ in range(lowerCAmelCase__ )]
for u, v in edges:
g[u].append(lowerCAmelCase__ )
return g
if __name__ == "__main__":
# Test
SCREAMING_SNAKE_CASE = 7
SCREAMING_SNAKE_CASE = [0, 0, 1, 2, 3, 3, 4, 4, 6]
SCREAMING_SNAKE_CASE = [1, 3, 2, 0, 1, 4, 5, 6, 5]
SCREAMING_SNAKE_CASE = [(u, v) for u, v in zip(source, target)]
SCREAMING_SNAKE_CASE = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 99 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Optional[int] = logging.get_logger(__name__)
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : str=False ) -> Tuple:
'''simple docstring'''
snake_case__ : int = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Tuple=False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : int = """"""
else:
snake_case__ : Dict = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : int = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case__ : Union[str, Any] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Optional[Any] = in_proj_bias[: config.hidden_size]
snake_case__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : Optional[int] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case__ : str = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : List[str] = dct.pop(__magic_name__ )
snake_case__ : Dict = val
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Optional[int] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : int=False ) -> Optional[int]:
'''simple docstring'''
snake_case__ : int = BitConfig(
global_padding="""same""" , layer_type="""bottleneck""" , depths=(3, 4, 9) , out_features=["""stage3"""] , embedding_dynamic_padding=__magic_name__ , )
snake_case__ : Optional[int] = ViTHybridConfig(backbone_config=__magic_name__ , image_size=3_84 , num_labels=10_00 )
snake_case__ : Union[str, Any] = False
# load original model from timm
snake_case__ : List[Any] = timm.create_model(__magic_name__ , pretrained=__magic_name__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : Optional[int] = timm_model.state_dict()
if base_model:
remove_classification_head_(__magic_name__ )
snake_case__ : int = create_rename_keys(__magic_name__ , __magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ , __magic_name__ )
snake_case__ : str = """huggingface/label-files"""
snake_case__ : Union[str, Any] = """imagenet-1k-id2label.json"""
snake_case__ : Dict = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : List[Any] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case__ : int = idalabel
snake_case__ : str = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case__ : str = ViTHybridModel(__magic_name__ ).eval()
else:
snake_case__ : Union[str, Any] = ViTHybridForImageClassification(__magic_name__ ).eval()
model.load_state_dict(__magic_name__ )
# create image processor
snake_case__ : Optional[Any] = create_transform(**resolve_data_config({} , model=__magic_name__ ) )
snake_case__ : Union[str, Any] = transform.transforms
snake_case__ : Tuple = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
snake_case__ : Any = ViTHybridImageProcessor(
do_resize=__magic_name__ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__magic_name__ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=__magic_name__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case__ : Any = prepare_img()
snake_case__ : int = transform(__magic_name__ ).unsqueeze(0 )
snake_case__ : List[str] = processor(__magic_name__ , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(__magic_name__ , __magic_name__ )
# verify logits
with torch.no_grad():
snake_case__ : Optional[Any] = model(__magic_name__ )
snake_case__ : Union[str, Any] = outputs.logits
print("""Predicted class:""" , logits.argmax(-1 ).item() )
if base_model:
snake_case__ : Dict = timm_model.forward_features(__magic_name__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__magic_name__ , outputs.pooler_output , atol=1E-3 )
else:
snake_case__ : int = timm_model(__magic_name__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__magic_name__ , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(__magic_name__ )
if push_to_hub:
print(f"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(f"ybelkada/{vit_name}" )
processor.push_to_hub(f"ybelkada/{vit_name}" )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
A_ : Union[str, Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 38 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A : Dict = logging.get_logger(__name__)
def __snake_case ( lowerCAmelCase_ ) -> YolosConfig:
SCREAMING_SNAKE_CASE__ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
SCREAMING_SNAKE_CASE__ = 1_9_2
SCREAMING_SNAKE_CASE__ = 7_6_8
SCREAMING_SNAKE_CASE__ = 1_2
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = [8_0_0, 1_3_3_3]
SCREAMING_SNAKE_CASE__ = False
elif yolos_name == "yolos_s_dWr":
SCREAMING_SNAKE_CASE__ = 3_3_0
SCREAMING_SNAKE_CASE__ = 1_4
SCREAMING_SNAKE_CASE__ = 6
SCREAMING_SNAKE_CASE__ = 1_3_2_0
elif "yolos_s" in yolos_name:
SCREAMING_SNAKE_CASE__ = 3_8_4
SCREAMING_SNAKE_CASE__ = 1_5_3_6
SCREAMING_SNAKE_CASE__ = 1_2
SCREAMING_SNAKE_CASE__ = 6
elif "yolos_b" in yolos_name:
SCREAMING_SNAKE_CASE__ = [8_0_0, 1_3_4_4]
SCREAMING_SNAKE_CASE__ = 9_1
SCREAMING_SNAKE_CASE__ = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE__ = '''coco-detection-id2label.json'''
SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE__ = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = idalabel
SCREAMING_SNAKE_CASE__ = {v: k for k, v in idalabel.items()}
return config
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False ) -> Optional[int]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[: config.hidden_size, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE__ = in_proj_weight[-config.hidden_size :, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-config.hidden_size :]
def __snake_case ( lowerCAmelCase_ ) -> str:
if "backbone" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> dict:
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ = orig_state_dict.pop(lowerCAmelCase_ )
if "qkv" in key:
SCREAMING_SNAKE_CASE__ = key.split('''.''' )
SCREAMING_SNAKE_CASE__ = int(key_split[2] )
SCREAMING_SNAKE_CASE__ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE__ = val[:dim, :]
SCREAMING_SNAKE_CASE__ = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE__ = val[:dim]
SCREAMING_SNAKE_CASE__ = val[dim : dim * 2]
SCREAMING_SNAKE_CASE__ = val[-dim:]
else:
SCREAMING_SNAKE_CASE__ = val
return orig_state_dict
def __snake_case ( ) -> torch.Tensor:
SCREAMING_SNAKE_CASE__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False ) -> List[str]:
SCREAMING_SNAKE_CASE__ = get_yolos_config(lowerCAmelCase_ )
# load original state_dict
SCREAMING_SNAKE_CASE__ = torch.load(lowerCAmelCase_ , map_location='''cpu''' )['''model''']
# load 🤗 model
SCREAMING_SNAKE_CASE__ = YolosForObjectDetection(lowerCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ = convert_state_dict(lowerCAmelCase_ , lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by YolosImageProcessor
SCREAMING_SNAKE_CASE__ = 8_0_0 if yolos_name != '''yolos_ti''' else 5_1_2
SCREAMING_SNAKE_CASE__ = YolosImageProcessor(format='''coco_detection''' , size=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = image_processor(images=prepare_img() , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ = model(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs.logits, outputs.pred_boxes
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None, None
if yolos_name == "yolos_ti":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(f'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , lowerCAmelCase_ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , lowerCAmelCase_ , atol=1e-4 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
SCREAMING_SNAKE_CASE__ = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
SCREAMING_SNAKE_CASE__ = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCAmelCase_ , organization='''hustvl''' )
model.push_to_hub(lowerCAmelCase_ , organization='''hustvl''' )
if __name__ == "__main__":
_A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--yolos_name""",
default="""yolos_s_200_pre""",
type=str,
help=(
"""Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"""
""" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."""
),
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original state dict (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_A : Union[str, Any] = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 100 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = 42
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=("DownEncoderBlock2D",) , __SCREAMING_SNAKE_CASE=(6_4,) , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE="silu" , __SCREAMING_SNAKE_CASE=True , ):
super().__init__()
snake_case__ : str = layers_per_block
snake_case__ : int = torch.nn.Convad(
__SCREAMING_SNAKE_CASE , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
snake_case__ : List[Any] = None
snake_case__ : List[Any] = nn.ModuleList([] )
# down
snake_case__ : Union[str, Any] = block_out_channels[0]
for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = output_channel
snake_case__ : Union[str, Any] = block_out_channels[i]
snake_case__ : int = i == len(__SCREAMING_SNAKE_CASE ) - 1
snake_case__ : str = get_down_block(
__SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block , in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__SCREAMING_SNAKE_CASE , resnet_groups=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
self.down_blocks.append(__SCREAMING_SNAKE_CASE )
# mid
snake_case__ : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
# out
snake_case__ : Tuple = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__SCREAMING_SNAKE_CASE , eps=1e-6 )
snake_case__ : Tuple = nn.SiLU()
snake_case__ : str = 2 * out_channels if double_z else out_channels
snake_case__ : int = nn.Convad(block_out_channels[-1] , __SCREAMING_SNAKE_CASE , 3 , padding=1 )
snake_case__ : Union[str, Any] = False
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = x
snake_case__ : int = self.conv_in(__SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
snake_case__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
# middle
snake_case__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
snake_case__ : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# middle
snake_case__ : str = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
snake_case__ : List[str] = down_block(__SCREAMING_SNAKE_CASE )
# middle
snake_case__ : str = self.mid_block(__SCREAMING_SNAKE_CASE )
# post-process
snake_case__ : Any = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = self.conv_act(__SCREAMING_SNAKE_CASE )
snake_case__ : str = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=("UpDecoderBlock2D",) , __SCREAMING_SNAKE_CASE=(6_4,) , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE="silu" , __SCREAMING_SNAKE_CASE="group" , ):
super().__init__()
snake_case__ : Any = layers_per_block
snake_case__ : Optional[Any] = nn.Convad(
__SCREAMING_SNAKE_CASE , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
snake_case__ : Union[str, Any] = None
snake_case__ : Dict = nn.ModuleList([] )
snake_case__ : Optional[int] = in_channels if norm_type == """spatial""" else None
# mid
snake_case__ : Tuple = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
# up
snake_case__ : List[Any] = list(reversed(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = output_channel
snake_case__ : Optional[Any] = reversed_block_out_channels[i]
snake_case__ : List[str] = i == len(__SCREAMING_SNAKE_CASE ) - 1
snake_case__ : int = get_up_block(
__SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , prev_output_channel=__SCREAMING_SNAKE_CASE , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , resnet_groups=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , resnet_time_scale_shift=__SCREAMING_SNAKE_CASE , )
self.up_blocks.append(__SCREAMING_SNAKE_CASE )
snake_case__ : int = output_channel
# out
if norm_type == "spatial":
snake_case__ : List[Any] = SpatialNorm(block_out_channels[0] , __SCREAMING_SNAKE_CASE )
else:
snake_case__ : Any = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__SCREAMING_SNAKE_CASE , eps=1e-6 )
snake_case__ : Tuple = nn.SiLU()
snake_case__ : Union[str, Any] = nn.Convad(block_out_channels[0] , __SCREAMING_SNAKE_CASE , 3 , padding=1 )
snake_case__ : int = False
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
snake_case__ : Union[str, Any] = z
snake_case__ : Any = self.conv_in(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
snake_case__ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
snake_case__ : int = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
else:
# middle
snake_case__ : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : str = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
# middle
snake_case__ : List[Any] = self.mid_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : Dict = up_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
snake_case__ : Optional[Any] = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : str = self.conv_norm_out(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.conv_act(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="random" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True ):
super().__init__()
snake_case__ : int = n_e
snake_case__ : Optional[int] = vq_embed_dim
snake_case__ : int = beta
snake_case__ : Optional[int] = legacy
snake_case__ : Dict = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
snake_case__ : List[str] = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
snake_case__ : Optional[Any] = self.used.shape[0]
snake_case__ : List[str] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
snake_case__ : Dict = self.re_embed
snake_case__ : List[str] = self.re_embed + 1
print(
f"Remapping {self.n_e} indices to {self.re_embed} indices. "
f"Using {self.unknown_index} for unknown indices." )
else:
snake_case__ : Union[str, Any] = n_e
snake_case__ : str = sane_index_shape
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
snake_case__ : Dict = inds.reshape(ishape[0] , -1 )
snake_case__ : Any = self.used.to(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = (inds[:, :, None] == used[None, None, ...]).long()
snake_case__ : List[Any] = match.argmax(-1 )
snake_case__ : List[str] = match.sum(2 ) < 1
if self.unknown_index == "random":
snake_case__ : List[str] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
snake_case__ : Optional[Any] = self.unknown_index
return new.reshape(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
snake_case__ : int = inds.reshape(ishape[0] , -1 )
snake_case__ : Optional[int] = self.used.to(__SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
snake_case__ : List[Any] = 0 # simply set to zero
snake_case__ : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __SCREAMING_SNAKE_CASE )
return back.reshape(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
# reshape z -> (batch, height, width, channel) and flatten
snake_case__ : Any = z.permute(0 , 2 , 3 , 1 ).contiguous()
snake_case__ : Optional[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
snake_case__ : Dict = torch.argmin(torch.cdist(__SCREAMING_SNAKE_CASE , self.embedding.weight ) , dim=1 )
snake_case__ : Union[str, Any] = self.embedding(__SCREAMING_SNAKE_CASE ).view(z.shape )
snake_case__ : List[str] = None
snake_case__ : Union[str, Any] = None
# compute loss for embedding
if not self.legacy:
snake_case__ : Tuple = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
snake_case__ : List[Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
snake_case__ : Any = z + (z_q - z).detach()
# reshape back to match original input shape
snake_case__ : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
snake_case__ : List[Any] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
snake_case__ : str = self.remap_to_used(__SCREAMING_SNAKE_CASE )
snake_case__ : str = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
snake_case__ : Tuple = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
snake_case__ : List[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
snake_case__ : Optional[int] = self.unmap_to_all(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
snake_case__ : int = self.embedding(__SCREAMING_SNAKE_CASE )
if shape is not None:
snake_case__ : str = z_q.view(__SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
snake_case__ : str = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
snake_case__ : Tuple = parameters
snake_case__ , snake_case__ : Any = torch.chunk(__SCREAMING_SNAKE_CASE , 2 , dim=1 )
snake_case__ : Union[str, Any] = torch.clamp(self.logvar , -30.0 , 20.0 )
snake_case__ : Optional[int] = deterministic
snake_case__ : Optional[int] = torch.exp(0.5 * self.logvar )
snake_case__ : Any = torch.exp(self.logvar )
if self.deterministic:
snake_case__ : List[str] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE = None ):
# make sure sample is on the same device as the parameters and has same dtype
snake_case__ : Dict = randn_tensor(
self.mean.shape , generator=__SCREAMING_SNAKE_CASE , device=self.parameters.device , dtype=self.parameters.dtype )
snake_case__ : Optional[int] = self.mean + self.std * sample
return x
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
snake_case__ : Any = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
return self.mean
| 38 | 0 |
def a__ ( A__ ):
if not isinstance(A__, A__ ):
raise ValueError('multiplicative_persistence() only accepts integral values' )
if num < 0:
raise ValueError('multiplicative_persistence() does not accept negative values' )
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : Dict = str(A__ )
while len(A__ ) != 1:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [int(A__ ) for i in num_string]
SCREAMING_SNAKE_CASE_ : List[str] = 1
for i in range(0, len(A__ ) ):
total *= numbers[i]
SCREAMING_SNAKE_CASE_ : int = str(A__ )
steps += 1
return steps
def a__ ( A__ ):
if not isinstance(A__, A__ ):
raise ValueError('additive_persistence() only accepts integral values' )
if num < 0:
raise ValueError('additive_persistence() does not accept negative values' )
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : List[Any] = str(A__ )
while len(A__ ) != 1:
SCREAMING_SNAKE_CASE_ : List[str] = [int(A__ ) for i in num_string]
SCREAMING_SNAKE_CASE_ : str = 0
for i in range(0, len(A__ ) ):
total += numbers[i]
SCREAMING_SNAKE_CASE_ : int = str(A__ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[1, 3_8_4, 2_4, 2_4] , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , ):
snake_case__ : str = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : Optional[int] = patch_size
snake_case__ : List[str] = num_channels
snake_case__ : Any = is_training
snake_case__ : int = use_labels
snake_case__ : str = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : str = backbone_out_indices
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Dict = intermediate_size
snake_case__ : Optional[Any] = hidden_act
snake_case__ : str = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : Dict = initializer_range
snake_case__ : Optional[int] = num_labels
snake_case__ : str = backbone_featmap_shape
snake_case__ : List[Any] = scope
snake_case__ : Optional[Any] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
snake_case__ : List[Any] = (image_size // patch_size) ** 2
snake_case__ : Union[str, Any] = num_patches + 1
def __UpperCamelCase ( self ):
snake_case__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : str = None
if self.use_labels:
snake_case__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
snake_case__ : Any = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [9_6, 1_9_2, 3_8_4, 7_6_8],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__SCREAMING_SNAKE_CASE , backbone_featmap_shape=self.backbone_featmap_shape , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = DPTModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = self.num_labels
snake_case__ : str = DPTForDepthEstimation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = self.num_labels
snake_case__ : Dict = DPTForSemanticSegmentation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : str = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Any = config_and_inputs
snake_case__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowerCamelCase__ = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = DPTModelTester(self )
snake_case__ : Any = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Tuple = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : List[str] = [*signature.parameters.keys()]
snake_case__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : int = True
if model_class in get_values(__SCREAMING_SNAKE_CASE ):
continue
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
snake_case__ : Optional[Any] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCamelCase ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Union[str, Any] = False
snake_case__ : str = True
if model_class in get_values(__SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
snake_case__ : List[str] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : str = _config_zero_init(__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(config=__SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
snake_case__ : str = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
snake_case__ : Optional[int] = [f"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __UpperCamelCase ( self ):
pass
@slow
def __UpperCamelCase ( self ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
snake_case__ : List[str] = DPTModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Dict = """add"""
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] = DPTForDepthEstimation(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> Dict:
'''simple docstring'''
snake_case__ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Dict = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
snake_case__ : Union[str, Any] = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = prepare_img()
snake_case__ : Optional[int] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case__ : Dict = model(**__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = outputs.predicted_depth
# verify the predicted depth
snake_case__ : Any = torch.Size((1, 3_8_4, 3_8_4) )
self.assertEqual(predicted_depth.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 38 | 0 |
"""simple docstring"""
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# Return True if there is node that has not iterated.
UpperCamelCase : List[str] = [False] * len(SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = []
queue.append(SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = True
while queue:
UpperCamelCase : Tuple = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = True
UpperCamelCase : Tuple = u
return visited[t]
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# This array is filled by BFS and to store path
UpperCamelCase : int = [-1] * (len(SCREAMING_SNAKE_CASE ))
UpperCamelCase : Any = 0
while bfs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : str = float("""Inf""" )
UpperCamelCase : str = sink
while s != source:
# Find the minimum value in select path
UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE , graph[parent[s]][s] )
UpperCamelCase : List[Any] = parent[s]
max_flow += path_flow
UpperCamelCase : List[str] = sink
while v != source:
UpperCamelCase : Tuple = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCamelCase : Tuple = parent[v]
return max_flow
__magic_name__ : Dict = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__magic_name__ , __magic_name__ : Union[str, Any] = 0, 5
print(ford_fulkerson(graph, source, sink))
| 102 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case__ : int = botoa.client("""iam""" )
snake_case__ : Union[str, Any] = {
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=__magic_name__ , AssumeRolePolicyDocument=json.dumps(__magic_name__ , indent=2 ) )
snake_case__ : Dict = {
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=__magic_name__ , PolicyName=f"{role_name}_policy_permission" , PolicyDocument=json.dumps(__magic_name__ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f"role {role_name} already exists. Using existing one" )
def UpperCamelCase__ ( __magic_name__ : Any ) -> Tuple:
'''simple docstring'''
snake_case__ : List[str] = botoa.client("""iam""" )
return iam_client.get_role(RoleName=__magic_name__ )["Role"]["Arn"]
def UpperCamelCase__ ( ) -> Tuple:
'''simple docstring'''
snake_case__ : Union[str, Any] = _ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , __magic_name__ , )
snake_case__ : List[Any] = None
if credentials_configuration == 0:
snake_case__ : Dict = _ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
snake_case__ : List[str] = aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
snake_case__ : List[str] = _ask_field("""AWS Access Key ID: """ )
snake_case__ : int = aws_access_key_id
snake_case__ : Optional[Any] = _ask_field("""AWS Secret Access Key: """ )
snake_case__ : List[str] = aws_secret_access_key
snake_case__ : Tuple = _ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
snake_case__ : Optional[int] = aws_region
snake_case__ : int = _ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , __magic_name__ , )
if role_management == 0:
snake_case__ : Optional[Any] = _ask_field("""Enter your IAM role name: """ )
else:
snake_case__ : Optional[int] = """accelerate_sagemaker_execution_role"""
print(f"Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials" )
_create_iam_role_for_sagemaker(__magic_name__ )
snake_case__ : Dict = _ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Any = None
if is_custom_docker_image:
snake_case__ : str = _ask_field("""Enter your Docker image: """ , lambda __magic_name__ : str(__magic_name__ ).lower() )
snake_case__ : Tuple = _ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : List[Any] = None
if is_sagemaker_inputs_enabled:
snake_case__ : str = _ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda __magic_name__ : str(__magic_name__ ).lower() , )
snake_case__ : Optional[int] = _ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Optional[Any] = None
if is_sagemaker_metrics_enabled:
snake_case__ : List[Any] = _ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda __magic_name__ : str(__magic_name__ ).lower() , )
snake_case__ : Tuple = _ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
snake_case__ : Any = {}
snake_case__ : List[Any] = _ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
if use_dynamo:
snake_case__ : str = """dynamo_"""
snake_case__ : Tuple = _ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
snake_case__ : List[str] = _ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
if use_custom_options:
snake_case__ : str = _ask_options(
"""Which mode do you want to use?""" , __magic_name__ , lambda __magic_name__ : TORCH_DYNAMO_MODES[int(__magic_name__ )] , default="""default""" , )
snake_case__ : Union[str, Any] = _ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : str = _ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Dict = """Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
snake_case__ : List[str] = _ask_options(
__magic_name__ , __magic_name__ , lambda __magic_name__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(__magic_name__ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
snake_case__ : Optional[int] = _ask_field(__magic_name__ , lambda __magic_name__ : str(__magic_name__ ).lower() , default="""ml.p3.2xlarge""" )
snake_case__ : Dict = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
snake_case__ : Optional[Any] = _ask_field(
"""How many machines do you want use? [1]: """ , __magic_name__ , default=1 , )
snake_case__ : Union[str, Any] = _ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=__magic_name__ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=__magic_name__ , use_cpu=__magic_name__ , dynamo_config=__magic_name__ , eca_instance_type=__magic_name__ , profile=__magic_name__ , region=__magic_name__ , iam_role_name=__magic_name__ , mixed_precision=__magic_name__ , num_machines=__magic_name__ , sagemaker_inputs_file=__magic_name__ , sagemaker_metrics_file=__magic_name__ , )
| 38 | 0 |
"""simple docstring"""
import requests
snake_case = '''https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='''
def snake_case ( lowerCAmelCase_ ) -> None:
# fetching a list of articles in json format
_snake_case = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['''articles'''] , 1 ):
print(f"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='''<Your BBC News API key goes here>''')
| 103 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def UpperCamelCase__ ( __magic_name__ : str = "laptop" ) -> DataFrame:
'''simple docstring'''
snake_case__ : Union[str, Any] = f"https://www.amazon.in/laptop/s?k={product}"
snake_case__ : List[str] = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
snake_case__ : int = BeautifulSoup(requests.get(__magic_name__ , headers=__magic_name__ ).text )
# Initialize a Pandas dataframe with the column titles
snake_case__ : Optional[Any] = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
snake_case__ : Optional[int] = item.ha.text
snake_case__ : Any = """https://www.amazon.in/""" + item.ha.a["""href"""]
snake_case__ : List[str] = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
snake_case__ : Dict = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
snake_case__ : Optional[int] = """Not available"""
try:
snake_case__ : Tuple = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
snake_case__ : Optional[Any] = """"""
try:
snake_case__ : str = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 1_00 )
except ValueError:
snake_case__ : List[Any] = float("""nan""" )
except AttributeError:
pass
snake_case__ : str = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
snake_case__ : List[Any] = """ """
snake_case__ : Union[str, Any] = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
A_ : int = "headphones"
get_amazon_product_data(product).to_csv(F'Amazon Product Data for {product}.csv')
| 38 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = """▁"""
UpperCamelCase = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
UpperCamelCase = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
UpperCamelCase = {"""vinai/bartpho-syllable""": 1024}
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : List[str] = VOCAB_FILES_NAMES
A__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
A__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : str = ["input_ids", "attention_mask"]
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
A__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
A__ = vocab_file
A__ = monolingual_vocab_file
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
A__ = {}
A__ = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(SCREAMING_SNAKE_CASE__ ) not in self.fairseq_tokens_to_ids:
A__ = cnt
cnt += 1
with open(SCREAMING_SNAKE_CASE__ , "r" , encoding="utf-8" ) as f:
for line in f.readlines():
A__ = line.strip().split()[0]
A__ = len(self.fairseq_tokens_to_ids )
if str(SCREAMING_SNAKE_CASE__ ) not in self.fairseq_tokens_to_ids:
A__ = len(self.fairseq_tokens_to_ids )
A__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Dict:
A__ = self.__dict__.copy()
A__ = None
A__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
A__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ = [self.cls_token_id]
A__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case__ ( self ) -> str:
return len(self.fairseq_ids_to_tokens )
def snake_case__ ( self ) -> Dict:
A__ = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
return self.fairseq_ids_to_tokens[index]
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> str:
A__ = "".join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__ , " " ).strip()
return out_string
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
A__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
A__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , "wb" ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , "w" , encoding="utf-8" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"""{str(SCREAMING_SNAKE_CASE__ )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 104 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = LongformerTokenizer
lowerCamelCase__ = True
lowerCamelCase__ = LongformerTokenizerFast
lowerCamelCase__ = True
def __UpperCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
snake_case__ : Optional[int] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
snake_case__ : int = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case__ : Any = {"""unk_token""": """<unk>"""}
snake_case__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = """lower newer"""
snake_case__ : Dict = """lower newer"""
return input_text, output_text
def __UpperCamelCase ( self ):
snake_case__ : int = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case__ : Tuple = """lower newer"""
snake_case__ : Optional[Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
snake_case__ : Tuple = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) # , add_prefix_space=True)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokens + [tokenizer.unk_token]
snake_case__ : List[Any] = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
snake_case__ : int = tokenizer.encode("""sequence builders""" , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.get_tokenizer()
snake_case__ : int = """Encode this sequence."""
snake_case__ : Union[str, Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
snake_case__ : Optional[int] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
snake_case__ : List[str] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing spaces after special tokens
snake_case__ : List[str] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )} ) # mask token has a left space
snake_case__ : Dict = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
snake_case__ : str = """Encode <mask> sequence"""
snake_case__ : Tuple = """Encode <mask>sequence"""
snake_case__ : Union[str, Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = encoded.index(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer.encode(__SCREAMING_SNAKE_CASE )
snake_case__ : str = encoded.index(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = """A, <mask> AllenNLP sentence."""
snake_case__ : str = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
snake_case__ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
snake_case__ : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def __UpperCamelCase ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
snake_case__ : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
snake_case__ : List[str] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state["""trim_offsets"""] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : Union[str, Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
snake_case__ : Any = f"{text_of_1_token} {text_of_1_token}"
snake_case__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Tuple = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Optional[Any] = f" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
snake_case__ : Dict = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ) + 1, 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
| 38 | 0 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : complex , lowerCamelCase_ : str = "x" , lowerCamelCase_ : float = 10**-10 , lowerCamelCase_ : int = 1 , ) -> complex:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = symbols(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = lambdify(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = lambdify(lowerCamelCase_ , diff(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : Dict = starting_point
while True:
if diff_function(lowerCamelCase_ ) != 0:
SCREAMING_SNAKE_CASE_ : Optional[int] = prev_guess - multiplicity * func(lowerCamelCase_ ) / diff_function(
lowerCamelCase_ )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
SCREAMING_SNAKE_CASE_ : str = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}""")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
F"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
F"""{newton_raphson("exp(x) - 1", 10, precision=0.0_05)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 105 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : Any = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''resnet'''
lowerCamelCase__ = ['''basic''', '''bottleneck''']
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE="bottleneck" , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
snake_case__ : List[Any] = num_channels
snake_case__ : str = embedding_size
snake_case__ : List[Any] = hidden_sizes
snake_case__ : Dict = depths
snake_case__ : List[Any] = layer_type
snake_case__ : int = hidden_act
snake_case__ : Union[str, Any] = downsample_in_first_stage
snake_case__ : Dict = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
snake_case__ , snake_case__ : Any = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1e-3
| 38 | 0 |
from __future__ import annotations
def lowerCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
A = get_failure_array(lowerCAmelCase__ )
# 2) Step through text searching for pattern
A , A = 0, 0 # index into text, pattern
while i < len(lowerCAmelCase__ ):
if pattern[j] == text[i]:
if j == (len(lowerCAmelCase__ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
A = failure[j - 1]
continue
i += 1
return False
def lowerCamelCase_ ( lowerCAmelCase__ : str ) -> list[int]:
'''simple docstring'''
A = [0]
A = 0
A = 1
while j < len(lowerCAmelCase__ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
A = failure[i - 1]
continue
j += 1
failure.append(lowerCAmelCase__ )
return failure
if __name__ == "__main__":
# Test 1)
__snake_case :Optional[Any] ='abc1abc12'
__snake_case :Any ='alskfjaldsabc1abc1abc12k23adsfabcabc'
__snake_case :Optional[int] ='alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__snake_case :Any ='ABABX'
__snake_case :List[Any] ='ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
__snake_case :Any ='AAAB'
__snake_case :str ='ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
__snake_case :Optional[int] ='abcdabcy'
__snake_case :Optional[Any] ='abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
__snake_case :int ='aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2] | 106 |
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 38 | 0 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class lowercase_ :
"""simple docstring"""
def __init__( self : List[str], UpperCamelCase__ : Dict, UpperCamelCase__ : Dict=13, UpperCamelCase__ : Union[str, Any]=7, UpperCamelCase__ : List[str]=False, UpperCamelCase__ : List[Any]=True, UpperCamelCase__ : str=False, UpperCamelCase__ : Tuple=False, UpperCamelCase__ : str=19, UpperCamelCase__ : Tuple=32, UpperCamelCase__ : Optional[Any]=5, UpperCamelCase__ : int=4, UpperCamelCase__ : str=37, UpperCamelCase__ : List[str]="gelu", UpperCamelCase__ : Union[str, Any]=0.1, UpperCamelCase__ : List[Any]=0.1, UpperCamelCase__ : int=5_12, UpperCamelCase__ : int=16, UpperCamelCase__ : str=2, UpperCamelCase__ : Any=0.02, UpperCamelCase__ : Any=3, UpperCamelCase__ : Any=4, UpperCamelCase__ : Union[str, Any]=None, ) -> Tuple:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
_A = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size], self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
_A = ids_tensor([self.batch_size], self.num_choices )
_A = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : str ) -> Dict:
_A = EsmConfig(
vocab_size=33, hidden_size=self.hidden_size, pad_token_id=1, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, is_folding_model=UpperCamelCase__, esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False}, )
return config
def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Optional[int], UpperCamelCase__ : str, UpperCamelCase__ : Dict, UpperCamelCase__ : List[Any], UpperCamelCase__ : int ) -> List[str]:
_A = EsmForProteinFolding(config=UpperCamelCase__ ).float()
model.to(UpperCamelCase__ )
model.eval()
_A = model(UpperCamelCase__, attention_mask=UpperCamelCase__ )
_A = model(UpperCamelCase__ )
_A = model(UpperCamelCase__ )
self.parent.assertEqual(result.positions.shape, (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape, (8, self.batch_size, self.seq_length, 7, 2) )
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = False
__lowerCAmelCase = (EsmForProteinFolding,) if is_torch_available() else ()
__lowerCAmelCase = ()
__lowerCAmelCase = {} if is_torch_available() else {}
__lowerCAmelCase = False
def __UpperCAmelCase ( self : List[str] ) -> Dict:
_A = EsmFoldModelTester(self )
_A = ConfigTester(self, config_class=UpperCamelCase__, hidden_size=37 )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Optional[int] ) -> str:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
@unittest.skip('Does not support attention outputs' )
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
pass
@unittest.skip
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
pass
@unittest.skip('Esm does not support embedding resizing' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
pass
@unittest.skip('Esm does not support embedding resizing' )
def __UpperCAmelCase ( self : str ) -> Any:
pass
@unittest.skip('ESMFold does not support passing input embeds!' )
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def __UpperCAmelCase ( self : Dict ) -> int:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
pass
@unittest.skip('ESMFold does not output hidden states in the normal way.' )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
pass
@unittest.skip('ESMfold does not output hidden states in the normal way.' )
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
pass
@unittest.skip('ESMFold only has one output format.' )
def __UpperCAmelCase ( self : Tuple ) -> Any:
pass
@unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
pass
@unittest.skip('ESMFold does not support input chunking.' )
def __UpperCAmelCase ( self : int ) -> int:
pass
@unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' )
def __UpperCAmelCase ( self : Optional[int] ) -> str:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def __UpperCAmelCase ( self : str ) -> str:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def __UpperCAmelCase ( self : str ) -> Tuple:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
pass
@unittest.skip('ESMFold doesn\'t support data parallel.' )
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
pass
@require_torch
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
_A = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float()
model.eval()
_A = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_A = model(UpperCamelCase__ )['positions']
_A = torch.tensor([2.5_828, 0.7_993, -10.9_334], dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0], UpperCamelCase__, atol=1e-4 ) )
| 107 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ):
snake_case__ : str = []
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_init_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_train_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_train_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_epoch_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_epoch_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_step_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_step_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_evaluate""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_predict""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_save""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_log""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_prediction_step""" )
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Tuple = tempfile.mkdtemp()
def __UpperCamelCase ( self ):
shutil.rmtree(self.output_dir )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
snake_case__ : List[Any] = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionModelConfig(a=__SCREAMING_SNAKE_CASE , b=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionPreTrainedModel(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = TrainingArguments(self.output_dir , disable_tqdm=__SCREAMING_SNAKE_CASE , report_to=[] , **__SCREAMING_SNAKE_CASE )
return Trainer(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , train_dataset=__SCREAMING_SNAKE_CASE , eval_dataset=__SCREAMING_SNAKE_CASE , callbacks=__SCREAMING_SNAKE_CASE , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
# Order doesn't matter
snake_case__ : Tuple = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
snake_case__ : List[str] = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
for cba, cba in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , cba.__class__ )
elif not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(cba.__class__ , __SCREAMING_SNAKE_CASE )
else:
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = ["""on_init_end""", """on_train_begin"""]
snake_case__ : Union[str, Any] = 0
snake_case__ : Dict = len(trainer.get_eval_dataloader() )
snake_case__ : Any = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("""on_epoch_begin""" )
for _ in range(__SCREAMING_SNAKE_CASE ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""" )
expected_events.append("""on_epoch_end""" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __UpperCamelCase ( self ):
snake_case__ : Any = self.get_trainer()
snake_case__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# Callbacks passed at init are added to the default callbacks
snake_case__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case__ : Optional[Any] = self.get_trainer(disable_tqdm=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case__ : int = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.remove(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = self.get_trainer()
snake_case__ : List[str] = trainer.pop_callback(__SCREAMING_SNAKE_CASE )
self.assertEqual(cb.__class__ , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
trainer.add_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# We can also add, pop, or remove by instance
snake_case__ : List[Any] = self.get_trainer()
snake_case__ : List[str] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.remove(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = self.get_trainer()
snake_case__ : Any = trainer.callback_handler.callbacks[0]
snake_case__ : Optional[Any] = trainer.pop_callback(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
trainer.add_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# Independent log/save/eval
snake_case__ : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" )
trainer.train()
snake_case__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# A bit of everything
snake_case__ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
snake_case__ : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock:
snake_case__ : List[str] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(__SCREAMING_SNAKE_CASE ) in warn_mock.call_args[0][0]
| 38 | 0 |
from __future__ import annotations
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : str ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = text, pattern
_UpperCAmelCase , _UpperCAmelCase = len(lowerCamelCase ), len(lowerCamelCase )
def lowerCamelCase ( self : int , lowerCamelCase : str ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def lowerCamelCase ( self : Optional[Any] , lowerCamelCase : int ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def lowerCamelCase ( self : int ) -> list[int]:
"""simple docstring"""
# searches pattern in text and returns index positions
_UpperCAmelCase = []
for i in range(self.textLen - self.patLen + 1 ):
_UpperCAmelCase = self.mismatch_in_text(lowerCamelCase )
if mismatch_index == -1:
positions.append(lowerCamelCase )
else:
_UpperCAmelCase = self.match_in_pattern(self.text[mismatch_index] )
_UpperCAmelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__a: List[Any] = '''ABAABA'''
__a: Optional[int] = '''AB'''
__a: List[str] = BoyerMooreSearch(text, pattern)
__a: str = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions) | 108 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=1_8 , __SCREAMING_SNAKE_CASE=3_0 , __SCREAMING_SNAKE_CASE=4_0_0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ):
snake_case__ : Any = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case__ : List[Any] = parent
snake_case__ : int = batch_size
snake_case__ : List[Any] = num_channels
snake_case__ : str = image_size
snake_case__ : Union[str, Any] = min_resolution
snake_case__ : List[Any] = max_resolution
snake_case__ : Tuple = do_resize
snake_case__ : int = size
snake_case__ : Tuple = do_normalize
snake_case__ : Dict = image_mean
snake_case__ : Union[str, Any] = image_std
def __UpperCamelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = DPTImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self ):
snake_case__ : str = DPTImageProcessingTester(self )
@property
def __UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_std""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """size""" ) )
def __UpperCamelCase ( self ):
snake_case__ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} )
snake_case__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
snake_case__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
snake_case__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : Any = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
snake_case__ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 38 | 0 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
__SCREAMING_SNAKE_CASE = _modexpt(__UpperCAmelCase , exponent // 2 , __UpperCAmelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__UpperCAmelCase , exponent - 1 , __UpperCAmelCase )) % modulo_value
def __magic_name__ ( __UpperCAmelCase = 1777 , __UpperCAmelCase = 1855 , __UpperCAmelCase = 8 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = base
for _ in range(1 , __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = _modexpt(__UpperCAmelCase , __UpperCAmelCase , 10**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 109 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """embed_dim""" ) )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """num_heads""" ) )
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[1_6, 4_8, 9_6] , __SCREAMING_SNAKE_CASE=[1, 3, 6] , __SCREAMING_SNAKE_CASE=[1, 2, 1_0] , __SCREAMING_SNAKE_CASE=[7, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2] , __SCREAMING_SNAKE_CASE=[2, 1, 1] , __SCREAMING_SNAKE_CASE=[2, 2, 2] , __SCREAMING_SNAKE_CASE=[False, False, True] , __SCREAMING_SNAKE_CASE=[0.0, 0.0, 0.0] , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-1_2 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=2 , ):
snake_case__ : List[str] = parent
snake_case__ : Tuple = batch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : List[Any] = patch_sizes
snake_case__ : Optional[int] = patch_stride
snake_case__ : Optional[Any] = patch_padding
snake_case__ : Any = is_training
snake_case__ : int = use_labels
snake_case__ : Dict = num_labels
snake_case__ : Optional[Any] = num_channels
snake_case__ : Optional[Any] = embed_dim
snake_case__ : Optional[int] = num_heads
snake_case__ : Optional[int] = stride_kv
snake_case__ : int = depth
snake_case__ : Optional[Any] = cls_token
snake_case__ : List[Any] = attention_drop_rate
snake_case__ : Union[str, Any] = initializer_range
snake_case__ : List[Any] = layer_norm_eps
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : List[Any] = None
if self.use_labels:
# create a random int32 tensor of given shape
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : List[str] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = TFCvtModel(config=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = (self.image_size, self.image_size)
snake_case__ , snake_case__ : str = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
snake_case__ : Any = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
snake_case__ : Optional[int] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = self.num_labels
snake_case__ : str = TFCvtForImageClassification(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Any = config_and_inputs
snake_case__ : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
lowerCamelCase__ = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = TFCvtModelTester(self )
snake_case__ : Any = TFCvtConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def __UpperCamelCase ( self ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def __UpperCamelCase ( self ):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def __UpperCamelCase ( self ):
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def __UpperCamelCase ( self ):
snake_case__ : List[str] = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(__SCREAMING_SNAKE_CASE )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Optional[Any] = [*signature.parameters.keys()]
snake_case__ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
def check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[int] = outputs.hidden_states
snake_case__ : Tuple = len(self.model_tester.depth )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[Any] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : List[str] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def __UpperCamelCase ( self ):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : str = TFCvtModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self ):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case__ : Union[str, Any] = self.default_image_processor
snake_case__ : int = prepare_img()
snake_case__ : Dict = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""tf""" )
# forward pass
snake_case__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
snake_case__ : str = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : int = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 38 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class a ( lowercase ):
UpperCamelCase : Tuple = """levit"""
def __init__( self , UpperCamelCase_=224 , UpperCamelCase_=3 , UpperCamelCase_=3 , UpperCamelCase_=2 , UpperCamelCase_=1 , UpperCamelCase_=16 , UpperCamelCase_=[128, 256, 384] , UpperCamelCase_=[4, 8, 12] , UpperCamelCase_=[4, 4, 4] , UpperCamelCase_=[16, 16, 16] , UpperCamelCase_=0 , UpperCamelCase_=[2, 2, 2] , UpperCamelCase_=[2, 2, 2] , UpperCamelCase_=0.02 , **UpperCamelCase_ , ):
super().__init__(**UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = image_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Any = kernel_size
UpperCAmelCase__ : Optional[Any] = stride
UpperCAmelCase__ : Optional[Any] = padding
UpperCAmelCase__ : Union[str, Any] = hidden_sizes
UpperCAmelCase__ : Dict = num_attention_heads
UpperCAmelCase__ : Union[str, Any] = depths
UpperCAmelCase__ : List[str] = key_dim
UpperCAmelCase__ : List[str] = drop_path_rate
UpperCAmelCase__ : Any = patch_size
UpperCAmelCase__ : int = attention_ratio
UpperCAmelCase__ : Union[str, Any] = mlp_ratio
UpperCAmelCase__ : Optional[Any] = initializer_range
UpperCAmelCase__ : List[str] = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class a ( lowercase ):
UpperCamelCase : Tuple = version.parse("""1.11""" )
@property
def __snake_case ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __snake_case ( self ):
return 1E-4
| 110 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
snake_case__ : int = [[1, 2, 4], [1, 2, 3, 4]]
snake_case__ : Any = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
self.assertTrue(isinstance(dc.token_ids , __SCREAMING_SNAKE_CASE ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __UpperCamelCase ( self ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
snake_case__ : Union[str, Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) # fails here
def __UpperCamelCase ( self ):
snake_case__ : List[str] = [[1, 2, 3], [1, 2, 4]]
snake_case__ : Optional[int] = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ , snake_case__ : Any = dc.update(1 )
snake_case__ : Any = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : Tuple = dc.update(2 )
snake_case__ : Tuple = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = dc.update(3 )
snake_case__ : List[str] = stepped is True and completed is True and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
snake_case__ : int = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ , snake_case__ : str = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : str = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : Dict = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 38 | 0 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
UpperCAmelCase__ : Optional[int] = logging.getLogger(__name__)
class UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase_ = """token-classification"""
def __init__( self , UpperCamelCase) -> int:
if type(__SCREAMING_SNAKE_CASE) == dict:
UpperCamelCase__ : Optional[Any] = Namespace(**__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : int = import_module('tasks')
try:
UpperCamelCase__ : Optional[int] = getattr(__SCREAMING_SNAKE_CASE , hparams.task_type)
UpperCamelCase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""")
UpperCamelCase__ : Optional[int] = self.token_classification_task.get_labels(hparams.labels)
UpperCamelCase__ : Optional[int] = CrossEntropyLoss().ignore_index
super().__init__(__SCREAMING_SNAKE_CASE , len(self.labels) , self.mode)
def lowerCAmelCase__ ( self , **UpperCamelCase) -> Union[str, Any]:
return self.model(**__SCREAMING_SNAKE_CASE)
def lowerCAmelCase__ ( self , UpperCamelCase , UpperCamelCase) -> Dict:
UpperCamelCase__ : int = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
UpperCamelCase__ : Any = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCamelCase__ : Any = self(**__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : int = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def lowerCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase__ : Union[str, Any] = self.hparams
for mode in ["train", "dev", "test"]:
UpperCamelCase__ : str = self._feature_file(__SCREAMING_SNAKE_CASE)
if os.path.exists(__SCREAMING_SNAKE_CASE) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , __SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Tuple = torch.load(__SCREAMING_SNAKE_CASE)
else:
logger.info('Creating features from dataset file at %s' , args.data_dir)
UpperCamelCase__ : Optional[int] = self.token_classification_task.read_examples_from_file(args.data_dir , __SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Optional[int] = self.token_classification_task.convert_examples_to_features(
__SCREAMING_SNAKE_CASE , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['xlnet']) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['xlnet'] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__SCREAMING_SNAKE_CASE , pad_on_left=bool(self.config.model_type in ['xlnet']) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('Saving features into cached file %s' , __SCREAMING_SNAKE_CASE)
torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def lowerCAmelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase = False) -> int:
UpperCamelCase__ : Optional[int] = self._feature_file(__SCREAMING_SNAKE_CASE)
logger.info('Loading features from cached file %s' , __SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Tuple = torch.load(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Dict = torch.tensor([f.input_ids for f in features] , dtype=torch.long)
UpperCamelCase__ : int = torch.tensor([f.attention_mask for f in features] , dtype=torch.long)
if features[0].token_type_ids is not None:
UpperCamelCase__ : List[str] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long)
else:
UpperCamelCase__ : Optional[Any] = torch.tensor([0 for f in features] , dtype=torch.long)
# HACK(we will not use this anymore soon)
UpperCamelCase__ : Optional[Any] = torch.tensor([f.label_ids for f in features] , dtype=torch.long)
return DataLoader(
TensorDataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) , batch_size=__SCREAMING_SNAKE_CASE)
def lowerCAmelCase__ ( self , UpperCamelCase , UpperCamelCase) -> List[str]:
"""Compute validation""" ""
UpperCamelCase__ : Optional[Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
UpperCamelCase__ : Tuple = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCamelCase__ : str = self(**__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Optional[Any] = outputs[:2]
UpperCamelCase__ : Union[str, Any] = logits.detach().cpu().numpy()
UpperCamelCase__ : Optional[int] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def lowerCAmelCase__ ( self , UpperCamelCase) -> Tuple:
UpperCamelCase__ : Any = torch.stack([x['val_loss'] for x in outputs]).mean()
UpperCamelCase__ : Tuple = np.concatenate([x['pred'] for x in outputs] , axis=0)
UpperCamelCase__ : List[Any] = np.argmax(__SCREAMING_SNAKE_CASE , axis=2)
UpperCamelCase__ : Optional[int] = np.concatenate([x['target'] for x in outputs] , axis=0)
UpperCamelCase__ : List[str] = dict(enumerate(self.labels))
UpperCamelCase__ : Any = [[] for _ in range(out_label_ids.shape[0])]
UpperCamelCase__ : Optional[int] = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
UpperCamelCase__ : int = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE),
"""precision""": precision_score(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE),
"""recall""": recall_score(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE),
"""f1""": fa_score(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE),
}
UpperCamelCase__ : Any = dict(results.items())
UpperCamelCase__ : Dict = results
return ret, preds_list, out_label_list
def lowerCAmelCase__ ( self , UpperCamelCase) -> Tuple:
# when stable
UpperCamelCase__ : Any = self._eval_end(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Any = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def lowerCAmelCase__ ( self , UpperCamelCase) -> Optional[int]:
# updating to test_epoch_end instead of deprecated test_end
UpperCamelCase__ : Union[str, Any] = self._eval_end(__SCREAMING_SNAKE_CASE)
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
UpperCamelCase__ : Dict = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase , UpperCamelCase) -> List[Any]:
# Add NER specific options
BaseTransformer.add_model_specific_args(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
parser.add_argument(
'--task_type' , default='NER' , type=__SCREAMING_SNAKE_CASE , help='Task type to fine tune in training (e.g. NER, POS, etc)')
parser.add_argument(
'--max_seq_length' , default=1_28 , type=__SCREAMING_SNAKE_CASE , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--labels' , default='' , type=__SCREAMING_SNAKE_CASE , help='Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.' , )
parser.add_argument(
'--gpus' , default=0 , type=__SCREAMING_SNAKE_CASE , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets')
return parser
if __name__ == "__main__":
UpperCAmelCase__ : Dict = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
UpperCAmelCase__ : int = NERTransformer.add_model_specific_args(parser, os.getcwd())
UpperCAmelCase__ : List[Any] = parser.parse_args()
UpperCAmelCase__ : Union[str, Any] = NERTransformer(args)
UpperCAmelCase__ : str = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
UpperCAmelCase__ : Union[str, Any] = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
UpperCAmelCase__ : List[str] = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 410 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Optional[int] = logging.get_logger(__name__)
A_ : Tuple = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''segformer'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[2, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[8, 4, 2, 1] , __SCREAMING_SNAKE_CASE=[3_2, 6_4, 1_6_0, 2_5_6] , __SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[1, 2, 5, 8] , __SCREAMING_SNAKE_CASE=[4, 4, 4, 4] , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1e-6 , __SCREAMING_SNAKE_CASE=2_5_6 , __SCREAMING_SNAKE_CASE=2_5_5 , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __SCREAMING_SNAKE_CASE , )
snake_case__ : Dict = num_channels
snake_case__ : Optional[Any] = num_encoder_blocks
snake_case__ : Any = depths
snake_case__ : Optional[int] = sr_ratios
snake_case__ : Tuple = hidden_sizes
snake_case__ : List[str] = patch_sizes
snake_case__ : str = strides
snake_case__ : Optional[int] = mlp_ratios
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : Dict = hidden_act
snake_case__ : Optional[int] = hidden_dropout_prob
snake_case__ : List[str] = attention_probs_dropout_prob
snake_case__ : List[Any] = classifier_dropout_prob
snake_case__ : int = initializer_range
snake_case__ : List[str] = drop_path_rate
snake_case__ : int = layer_norm_eps
snake_case__ : List[Any] = decoder_hidden_size
snake_case__ : List[Any] = kwargs.get("""reshape_last_stage""" , __SCREAMING_SNAKE_CASE )
snake_case__ : Dict = semantic_loss_ignore_index
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1e-4
@property
def __UpperCamelCase ( self ):
return 1_2
| 38 | 0 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE = ["""image_processor""", """tokenizer"""]
_SCREAMING_SNAKE_CASE = """BlipImageProcessor"""
_SCREAMING_SNAKE_CASE = """AutoTokenizer"""
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int ):
__a = False
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = self.image_processor
def __call__( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple = None , __SCREAMING_SNAKE_CASE : List[str] = None , __SCREAMING_SNAKE_CASE : Union[str, Any] = True , __SCREAMING_SNAKE_CASE : List[Any] = False , __SCREAMING_SNAKE_CASE : List[Any] = None , __SCREAMING_SNAKE_CASE : List[str] = None , __SCREAMING_SNAKE_CASE : Union[str, Any] = 0 , __SCREAMING_SNAKE_CASE : int = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : str = False , __SCREAMING_SNAKE_CASE : Optional[Any] = False , __SCREAMING_SNAKE_CASE : List[Any] = False , __SCREAMING_SNAKE_CASE : List[Any] = False , __SCREAMING_SNAKE_CASE : List[Any] = False , __SCREAMING_SNAKE_CASE : List[str] = True , __SCREAMING_SNAKE_CASE : List[str] = None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ):
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
__a = self.tokenizer
__a = self.tokenizer(
text=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_overflowing_tokens=__SCREAMING_SNAKE_CASE , return_special_tokens_mask=__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , return_length=__SCREAMING_SNAKE_CASE , verbose=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
return text_encoding
# add pixel_values
__a = self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE )
if text is not None:
__a = self.tokenizer(
text=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_overflowing_tokens=__SCREAMING_SNAKE_CASE , return_special_tokens_mask=__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , return_length=__SCREAMING_SNAKE_CASE , verbose=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
else:
__a = None
if text_encoding is not None:
encoding_image_processor.update(__SCREAMING_SNAKE_CASE )
return encoding_image_processor
def _UpperCAmelCase ( self : Optional[Any] , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : List[str] ):
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Any , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : str ):
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _UpperCAmelCase ( self : Optional[Any] ):
__a = self.tokenizer.model_input_names
__a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 197 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : List[Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = None
if token is not None:
snake_case__ : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : List[Any] = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : str = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Tuple = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : Dict = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
snake_case__ : Union[str, Any] = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : Dict = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Dict = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Dict = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : Any = result.headers["""Location"""]
snake_case__ : Tuple = requests.get(__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : int = os.path.join(__magic_name__ , f"{artifact_name}.zip" )
with open(__magic_name__ , """wb""" ) as fp:
fp.write(response.content )
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : str=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Any = []
snake_case__ : Union[str, Any] = []
snake_case__ : Any = None
with zipfile.ZipFile(__magic_name__ ) as z:
for filename in z.namelist():
if not os.path.isdir(__magic_name__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__magic_name__ ) as f:
for line in f:
snake_case__ : Any = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
snake_case__ : str = line[: line.index(""": """ )]
snake_case__ : Optional[int] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
snake_case__ : Dict = line[len("""FAILED """ ) :]
failed_tests.append(__magic_name__ )
elif filename == "job_name.txt":
snake_case__ : Optional[Any] = line
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError(
f"`errors` and `failed_tests` should have the same number of elements. Got {len(__magic_name__ )} for `errors` "
f"and {len(__magic_name__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
""" problem.""" )
snake_case__ : Optional[Any] = None
if job_name and job_links:
snake_case__ : Optional[Any] = job_links.get(__magic_name__ , __magic_name__ )
# A list with elements of the form (line of error, error, failed test)
snake_case__ : List[Any] = [x + [y] + [job_link] for x, y in zip(__magic_name__ , __magic_name__ )]
return result
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : Union[str, Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = []
snake_case__ : Dict = [os.path.join(__magic_name__ , __magic_name__ ) for p in os.listdir(__magic_name__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__magic_name__ , job_links=__magic_name__ ) )
return errors
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : str=None ) -> List[Any]:
'''simple docstring'''
snake_case__ : Any = Counter()
counter.update([x[1] for x in logs] )
snake_case__ : Dict = counter.most_common()
snake_case__ : Any = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
snake_case__ : int = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case__ : str = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
snake_case__ : Tuple = test.split("""/""" )[2]
else:
snake_case__ : Any = None
return test
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : Union[str, Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : List[str] = [(x[0], x[1], get_model(x[2] )) for x in logs]
snake_case__ : List[Any] = [x for x in logs if x[2] is not None]
snake_case__ : Any = {x[2] for x in logs}
snake_case__ : Optional[Any] = {}
for test in tests:
snake_case__ : str = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
snake_case__ : Optional[int] = counter.most_common()
snake_case__ : Optional[int] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
snake_case__ : int = sum(error_counts.values() )
if n_errors > 0:
snake_case__ : str = {"""count""": n_errors, """errors""": error_counts}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : int ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Optional[Any] = """| no. | error | status |"""
snake_case__ : int = """|-:|:-|:-|"""
snake_case__ : int = [header, sep]
for error in reduced_by_error:
snake_case__ : Union[str, Any] = reduced_by_error[error]["""count"""]
snake_case__ : Dict = f"| {count} | {error[:1_00]} | |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : Dict ) -> List[Any]:
'''simple docstring'''
snake_case__ : List[Any] = """| model | no. of errors | major error | count |"""
snake_case__ : Optional[int] = """|-:|-:|-:|-:|"""
snake_case__ : Dict = [header, sep]
for model in reduced_by_model:
snake_case__ : Tuple = reduced_by_model[model]["""count"""]
snake_case__ , snake_case__ : Tuple = list(reduced_by_model[model]["""errors"""].items() )[0]
snake_case__ : Optional[int] = f"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
A_ : int = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
A_ : Optional[int] = get_job_links(args.workflow_run_id, token=args.token)
A_ : Optional[Any] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
A_ : int = k.find(" / ")
A_ : List[Any] = k[index + len(" / ") :]
A_ : List[str] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
A_ : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
A_ : str = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
A_ : List[str] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
A_ : Any = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
A_ : Any = reduce_by_error(errors)
A_ : Union[str, Any] = reduce_by_model(errors)
A_ : Any = make_github_table(reduced_by_error)
A_ : Optional[Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 38 | 0 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be trained.'''} )
SCREAMING_SNAKE_CASE : Optional[Any] = field(
default='''./''' , metadata={'''help''': '''Save dir where model repo is cloned and models updates are saved to.'''} )
SCREAMING_SNAKE_CASE : Union[str, Any] = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path of training dataset.'''} )
SCREAMING_SNAKE_CASE : List[Any] = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
SCREAMING_SNAKE_CASE : str = field(default=2 , metadata={'''help''': '''Batch size for training.'''} )
SCREAMING_SNAKE_CASE : Optional[Any] = field(default=2 , metadata={'''help''': '''Batch size for evaluation.'''} )
SCREAMING_SNAKE_CASE : List[str] = field(default=0.1 , metadata={'''help''': '''Value of weight decay.'''} )
SCREAMING_SNAKE_CASE : Optional[Any] = field(
default=1_00_00 , metadata={'''help''': '''Size of buffer used to shuffle streaming dataset.'''} )
SCREAMING_SNAKE_CASE : int = field(default=2e-4 , metadata={'''help''': '''Learning rate fo training.'''} )
SCREAMING_SNAKE_CASE : int = field(default='''cosine''' , metadata={'''help''': '''Learning rate.'''} )
SCREAMING_SNAKE_CASE : Any = field(
default=7_50 , metadata={'''help''': '''Number of warmup steps in the learning rate schedule.'''} )
SCREAMING_SNAKE_CASE : Any = field(
default=16 , metadata={'''help''': '''Number of gradient accumulation steps.'''} )
SCREAMING_SNAKE_CASE : str = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Use gradient checkpointing to reduce memory footprint.'''} )
SCREAMING_SNAKE_CASE : int = field(default=5_00_00 , metadata={'''help''': '''Maximum number of training steps.'''} )
SCREAMING_SNAKE_CASE : List[Any] = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
SCREAMING_SNAKE_CASE : Optional[int] = field(default=10_24 , metadata={'''help''': '''Sequence lengths used for training.'''} )
SCREAMING_SNAKE_CASE : List[str] = field(default=1 , metadata={'''help''': '''Training seed.'''} )
SCREAMING_SNAKE_CASE : Optional[Any] = field(
default=10_24 , metadata={'''help''': '''Interval to save checkpoints. Measured as number of forward passes not training steps.'''} , )
SCREAMING_SNAKE_CASE : int = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''States path if the training should continue from a checkpoint folder.'''} )
SCREAMING_SNAKE_CASE : int = field(default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''If True the data is pretokenized.'''} )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
SCREAMING_SNAKE_CASE : int = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
SCREAMING_SNAKE_CASE : Union[str, Any] = field(default=2 , metadata={'''help''': '''Batch size used for evaluation.'''} )
SCREAMING_SNAKE_CASE : Optional[Any] = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
SCREAMING_SNAKE_CASE : Optional[Any] = field(default=10_24 , metadata={'''help''': '''Length of sequences to be evaluated.'''} )
SCREAMING_SNAKE_CASE : List[str] = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
SCREAMING_SNAKE_CASE : int = field(default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
SCREAMING_SNAKE_CASE : Dict = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''The number of human-eval tasks to run. If not included all tasks are evaluated.'''} , )
SCREAMING_SNAKE_CASE : Tuple = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Sample from the language model\'s output distribution.'''} )
SCREAMING_SNAKE_CASE : Optional[int] = field(default=0.2 , metadata={'''help''': '''Sampling temperature used for generation.'''} )
SCREAMING_SNAKE_CASE : str = field(default=2_56 , metadata={'''help''': '''Maximum number of newly generated tokens.'''} )
SCREAMING_SNAKE_CASE : List[str] = field(default=0 , metadata={'''help''': '''Top-k parameter used for generation.'''} )
SCREAMING_SNAKE_CASE : Tuple = field(default=0.95 , metadata={'''help''': '''Top-p parameter used for nucleus sampling.'''} )
SCREAMING_SNAKE_CASE : Optional[Any] = field(default=10 , metadata={'''help''': '''Number of generations to run in parallel.'''} )
SCREAMING_SNAKE_CASE : Optional[Any] = field(
default=2_00 , metadata={'''help''': '''Number of completions to generate for each sample.'''} )
SCREAMING_SNAKE_CASE : Union[str, Any] = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
SCREAMING_SNAKE_CASE : Any = field(
default='''eval_results.json''' , metadata={'''help''': '''Random seed used for evaluation.'''} )
SCREAMING_SNAKE_CASE : Optional[Any] = field(
default='''0''' , metadata={'''help''': '''Allow `code_eval` to execute Python code on machine'''} )
SCREAMING_SNAKE_CASE : List[str] = field(
default=-1 , metadata={
'''help''': (
'''Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'''
''' number corresponds to which GPU device id to run on.'''
)
} , )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'''help''': '''The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'''
} , )
SCREAMING_SNAKE_CASE : Union[str, Any] = field(
default='''transformersbook/codeparrot''' , metadata={'''help''': '''Folder or name of dataset to process.'''} )
SCREAMING_SNAKE_CASE : Dict = field(
default='''codeparrot-clean''' , metadata={'''help''': '''Folder to save processed processed dataset.'''} )
SCREAMING_SNAKE_CASE : Any = field(
default=10_00_00 , metadata={'''help''': '''Number of files to save per JSON output file.'''} )
SCREAMING_SNAKE_CASE : List[str] = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
SCREAMING_SNAKE_CASE : Union[str, Any] = field(
default=10_00 , metadata={'''help''': '''Maximum line length in file, otherwise file is filtered.'''} )
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=1_00 , metadata={'''help''': '''Maximum mean line length in file, otherwise file is filtered.'''} )
SCREAMING_SNAKE_CASE : int = field(
default=0.25 , metadata={'''help''': '''Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'''} )
SCREAMING_SNAKE_CASE : str = field(
default=1.5 , metadata={'''help''': '''Minimum character token ratio for the file, otherwise file is filtered.'''} )
SCREAMING_SNAKE_CASE : Optional[Any] = field(
default=0.7 , metadata={'''help''': '''Probability for filtering config, test and uncommon files.'''} )
SCREAMING_SNAKE_CASE : Tuple = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} , )
SCREAMING_SNAKE_CASE : str = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''If True, near-duplicate samples are removed.'''} )
SCREAMING_SNAKE_CASE : Dict = field(
default=0.85 , metadata={'''help''': '''Jaccard threshold for near-duplicate samples.'''} )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = field(
default='''gpt2''' , metadata={'''help''': '''Base tokenizer to build new tokenizer from.'''} )
SCREAMING_SNAKE_CASE : str = field(
default='''transformersbook/codeparrot-train''' , metadata={'''help''': '''Dataset to train tokenizer on.'''} )
SCREAMING_SNAKE_CASE : Union[str, Any] = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
SCREAMING_SNAKE_CASE : List[Any] = field(default=20_00_00 , metadata={'''help''': '''Number of examples to train tokenizer on.'''} )
SCREAMING_SNAKE_CASE : Optional[Any] = field(
default=3_27_68 , metadata={'''help''': '''Number of examples to train the tokenizer on.'''} )
SCREAMING_SNAKE_CASE : str = field(default='''codeparrot''' , metadata={'''help''': '''Name of new tokenizer.'''} )
SCREAMING_SNAKE_CASE : Union[str, Any] = field(default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} )
SCREAMING_SNAKE_CASE : List[str] = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path to the dataset to pretokenize.'''} )
SCREAMING_SNAKE_CASE : Union[str, Any] = field(
default='''tokenized-codeparrot-train''' , metadata={'''help''': '''Repo name of the pretokenized data.'''} )
SCREAMING_SNAKE_CASE : List[str] = field(default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = field(
default='''gpt2-large''' , metadata={'''help''': '''Configuration to use for model initialization.'''} )
SCREAMING_SNAKE_CASE : int = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Tokenizer attached to model.'''} )
SCREAMING_SNAKE_CASE : Optional[int] = field(default='''codeparrot''' , metadata={'''help''': '''Name of the created model.'''} )
SCREAMING_SNAKE_CASE : Dict = field(default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
| 699 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
A_ : Tuple = get_logger(__name__)
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = '''dummy_data'''
lowerCamelCase__ = '''datasets'''
lowerCamelCase__ = False
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , ):
snake_case__ : List[Any] = 0
snake_case__ : Union[str, Any] = dataset_name
snake_case__ : Optional[int] = cache_dir
snake_case__ : Union[str, Any] = use_local_dummy_data
snake_case__ : int = config
# download_callbacks take a single url as input
snake_case__ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
snake_case__ : Union[str, Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
snake_case__ : Union[str, Any] = str(__SCREAMING_SNAKE_CASE )
# to be downloaded
snake_case__ : List[str] = None
snake_case__ : List[str] = None
@property
def __UpperCamelCase ( self ):
if self._dummy_file is None:
snake_case__ : List[str] = self.download_dummy_data()
return self._dummy_file
@property
def __UpperCamelCase ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def __UpperCamelCase ( self ):
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
snake_case__ : Optional[int] = cached_path(
__SCREAMING_SNAKE_CASE , cache_dir=self.cache_dir , extract_compressed_file=__SCREAMING_SNAKE_CASE , force_extract=__SCREAMING_SNAKE_CASE )
return os.path.join(__SCREAMING_SNAKE_CASE , self.dummy_file_name )
@property
def __UpperCamelCase ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __UpperCamelCase ( self ):
if self._bucket_url is None:
snake_case__ : List[str] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def __UpperCamelCase ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
snake_case__ : List[Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
snake_case__ : List[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.create_dummy_data_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
return self.create_dummy_data_list(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
return self.create_dummy_data_single(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return path
def __UpperCamelCase ( self ):
return {}
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for single_url in single_urls:
download_callback(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : List[str] = single_urls
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = [os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) ) for x in single_urls]
else:
snake_case__ : List[Any] = single_urls
snake_case__ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) )
snake_case__ : Optional[int] = value
# make sure that values are unique
if all(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
snake_case__ : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
snake_case__ : Tuple = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , __SCREAMING_SNAKE_CASE ) ) for url in data_url )
snake_case__ : List[Any] = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
snake_case__ : List[str] = [data_url[0]] * len(__SCREAMING_SNAKE_CASE )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : List[Any] = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(__SCREAMING_SNAKE_CASE )
return dummy_data_list
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : Any = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(__SCREAMING_SNAKE_CASE ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
def _iter_archive_members(__SCREAMING_SNAKE_CASE ):
# this preserves the order of the members inside the ZIP archive
snake_case__ : List[str] = Path(self.dummy_file ).parent
snake_case__ : Dict = path.relative_to(__SCREAMING_SNAKE_CASE )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
snake_case__ : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = Path(__SCREAMING_SNAKE_CASE )
snake_case__ : int = _iter_archive_members(__SCREAMING_SNAKE_CASE ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(__SCREAMING_SNAKE_CASE ).as_posix(), file_path.open("""rb""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = [paths]
for path in paths:
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(__SCREAMING_SNAKE_CASE ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 38 | 0 |
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class _A ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int ):
'''simple docstring'''
__lowercase = []
def _snake_case ( self : str , lowerCamelCase : Dict , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , **lowerCamelCase : Dict ):
'''simple docstring'''
self.events.append("on_init_end" )
def _snake_case ( self : List[str] , lowerCamelCase : str , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] , **lowerCamelCase : List[str] ):
'''simple docstring'''
self.events.append("on_train_begin" )
def _snake_case ( self : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , **lowerCamelCase : List[str] ):
'''simple docstring'''
self.events.append("on_train_end" )
def _snake_case ( self : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : str , **lowerCamelCase : str ):
'''simple docstring'''
self.events.append("on_epoch_begin" )
def _snake_case ( self : Optional[int] , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , **lowerCamelCase : Any ):
'''simple docstring'''
self.events.append("on_epoch_end" )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : Any , lowerCamelCase : int , lowerCamelCase : Dict , **lowerCamelCase : Dict ):
'''simple docstring'''
self.events.append("on_step_begin" )
def _snake_case ( self : Tuple , lowerCamelCase : str , lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , **lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
self.events.append("on_step_end" )
def _snake_case ( self : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Optional[Any] , **lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
self.events.append("on_evaluate" )
def _snake_case ( self : str , lowerCamelCase : Dict , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , **lowerCamelCase : int ):
'''simple docstring'''
self.events.append("on_predict" )
def _snake_case ( self : Any , lowerCamelCase : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : str , **lowerCamelCase : str ):
'''simple docstring'''
self.events.append("on_save" )
def _snake_case ( self : Tuple , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : List[Any] , **lowerCamelCase : Tuple ):
'''simple docstring'''
self.events.append("on_log" )
def _snake_case ( self : Dict , lowerCamelCase : Dict , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , **lowerCamelCase : List[Any] ):
'''simple docstring'''
self.events.append("on_prediction_step" )
@require_torch
class _A ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowercase = tempfile.mkdtemp()
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
shutil.rmtree(self.output_dir )
def _snake_case ( self : Optional[int] , lowerCamelCase : Union[str, Any]=0 , lowerCamelCase : Dict=0 , lowerCamelCase : Optional[Any]=64 , lowerCamelCase : Optional[Any]=64 , lowerCamelCase : Optional[int]=None , lowerCamelCase : Optional[int]=False , **lowerCamelCase : List[Any] ):
'''simple docstring'''
__lowercase = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
__lowercase = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
__lowercase = RegressionModelConfig(a=__SCREAMING_SNAKE_CASE , b=__SCREAMING_SNAKE_CASE )
__lowercase = RegressionPreTrainedModel(__SCREAMING_SNAKE_CASE )
__lowercase = TrainingArguments(self.output_dir , disable_tqdm=__SCREAMING_SNAKE_CASE , report_to=[] , **__SCREAMING_SNAKE_CASE )
return Trainer(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , train_dataset=__SCREAMING_SNAKE_CASE , eval_dataset=__SCREAMING_SNAKE_CASE , callbacks=__SCREAMING_SNAKE_CASE , )
def _snake_case ( self : Optional[int] , lowerCamelCase : List[Any] , lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
# Order doesn't matter
__lowercase = sorted(__SCREAMING_SNAKE_CASE , key=lambda lowerCamelCase : cb.__name__ if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
__lowercase = sorted(__SCREAMING_SNAKE_CASE , key=lambda lowerCamelCase : cb.__name__ if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
for cba, cba in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , cba.__class__ )
elif not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(cba.__class__ , __SCREAMING_SNAKE_CASE )
else:
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _snake_case ( self : List[str] , lowerCamelCase : str ):
'''simple docstring'''
__lowercase = ["""on_init_end""", """on_train_begin"""]
__lowercase = 0
__lowercase = len(trainer.get_eval_dataloader() )
__lowercase = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("on_epoch_begin" )
for _ in range(__SCREAMING_SNAKE_CASE ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("on_log" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("on_save" )
expected_events.append("on_epoch_end" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowercase = self.get_trainer()
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# Callbacks passed at init are added to the default callbacks
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__lowercase = self.get_trainer(disable_tqdm=__SCREAMING_SNAKE_CASE )
__lowercase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__lowercase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.remove(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
__lowercase = self.get_trainer()
__lowercase = trainer.pop_callback(__SCREAMING_SNAKE_CASE )
self.assertEqual(cb.__class__ , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
trainer.add_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# We can also add, pop, or remove by instance
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.remove(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
__lowercase = trainer.pop_callback(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
trainer.add_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
def _snake_case ( self : str ):
'''simple docstring'''
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="ignore" , category=__SCREAMING_SNAKE_CASE )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# Independent log/save/eval
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps" )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch" )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# A bit of everything
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="steps" , )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# warning should be emitted for duplicated callbacks
with patch("transformers.trainer_callback.logger.warning" ) as warn_mock:
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(__SCREAMING_SNAKE_CASE ) in warn_mock.call_args[0][0]
| 402 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = IFImgaImgSuperResolutionPipeline
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __UpperCamelCase ( self ):
return self._get_superresolution_dummy_components()
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
snake_case__ : List[Any] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : Tuple = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __UpperCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __UpperCamelCase ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __UpperCamelCase ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __UpperCamelCase ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __UpperCamelCase ( self ):
self._test_save_load_local()
def __UpperCamelCase ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 38 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"vinvino02/glpn-kitti": "https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = """glpn"""
def __init__( self : List[Any] , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : Tuple=4 , lowerCAmelCase_ : Optional[int]=[2, 2, 2, 2] , lowerCAmelCase_ : int=[8, 4, 2, 1] , lowerCAmelCase_ : int=[32, 64, 160, 256] , lowerCAmelCase_ : int=[7, 3, 3, 3] , lowerCAmelCase_ : str=[4, 2, 2, 2] , lowerCAmelCase_ : int=[1, 2, 5, 8] , lowerCAmelCase_ : int=[4, 4, 4, 4] , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : List[Any]=0.0 , lowerCAmelCase_ : int=0.0 , lowerCAmelCase_ : Optional[int]=0.02 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : Dict=1e-6 , lowerCAmelCase_ : List[str]=64 , lowerCAmelCase_ : List[str]=10 , lowerCAmelCase_ : Dict=-1 , **lowerCAmelCase_ : Union[str, Any] , ) -> Dict:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = num_encoder_blocks
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = sr_ratios
SCREAMING_SNAKE_CASE_ = hidden_sizes
SCREAMING_SNAKE_CASE_ = patch_sizes
SCREAMING_SNAKE_CASE_ = strides
SCREAMING_SNAKE_CASE_ = mlp_ratios
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = drop_path_rate
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = decoder_hidden_size
SCREAMING_SNAKE_CASE_ = max_depth
SCREAMING_SNAKE_CASE_ = head_in_index
| 393 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
A_ : Dict = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : List[Any]=None , __magic_name__ : List[str]=None , __magic_name__ : List[str]=None ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[int] = True
while ask_again:
snake_case__ : Optional[Any] = input(__magic_name__ )
try:
if default is not None and len(__magic_name__ ) == 0:
return default
return convert_value(__magic_name__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Any=[] , __magic_name__ : Optional[int]=None , __magic_name__ : int=0 ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Union[str, Any] = BulletMenu(__magic_name__ , __magic_name__ )
snake_case__ : Optional[Any] = menu.run(default_choice=__magic_name__ )
return convert_value(__magic_name__ ) if convert_value is not None else result
def UpperCamelCase__ ( __magic_name__ : Any ) -> int:
'''simple docstring'''
snake_case__ : Tuple = int(__magic_name__ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def UpperCamelCase__ ( __magic_name__ : str ) -> Tuple:
'''simple docstring'''
snake_case__ : List[Any] = int(__magic_name__ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def UpperCamelCase__ ( __magic_name__ : List[str] ) -> List[Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = int(__magic_name__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def UpperCamelCase__ ( __magic_name__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = int(__magic_name__ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def UpperCamelCase__ ( __magic_name__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = int(__magic_name__ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def UpperCamelCase__ ( __magic_name__ : Dict ) -> Tuple:
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class __snake_case ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = super()._format_usage(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : str = usage.replace("""<command> [<args>] """ , """""" )
return usage
| 38 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : List[Any] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Union[str, Any] ={
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__snake_case = """swinv2"""
__snake_case = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , _lowercase=224 , _lowercase=4 , _lowercase=3 , _lowercase=96 , _lowercase=[2, 2, 6, 2] , _lowercase=[3, 6, 12, 24] , _lowercase=7 , _lowercase=4.0 , _lowercase=True , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase="gelu" , _lowercase=False , _lowercase=0.02 , _lowercase=1E-5 , _lowercase=32 , **_lowercase , ) -> Optional[int]:
super().__init__(**__SCREAMING_SNAKE_CASE )
_lowerCamelCase : Optional[Any] = image_size
_lowerCamelCase : Any = patch_size
_lowerCamelCase : str = num_channels
_lowerCamelCase : Tuple = embed_dim
_lowerCamelCase : Optional[int] = depths
_lowerCamelCase : List[Any] = len(__SCREAMING_SNAKE_CASE )
_lowerCamelCase : Any = num_heads
_lowerCamelCase : int = window_size
_lowerCamelCase : List[Any] = mlp_ratio
_lowerCamelCase : Dict = qkv_bias
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Any = drop_path_rate
_lowerCamelCase : str = hidden_act
_lowerCamelCase : Dict = use_absolute_embeddings
_lowerCamelCase : Any = layer_norm_eps
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : Optional[int] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCamelCase : Any = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
_lowerCamelCase : Tuple = (0, 0, 0, 0)
| 434 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( __magic_name__ : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(__magic_name__ ) / len(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 | 0 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class snake_case_ :
def __init__( self :Optional[int] ,__snake_case :Optional[Any] ,__snake_case :List[Any]=13 ,__snake_case :Dict=7 ,__snake_case :List[str]=True ,__snake_case :Dict=True ,__snake_case :Union[str, Any]=99 ,__snake_case :str=32 ,__snake_case :List[Any]=5 ,__snake_case :List[Any]=4 ,__snake_case :Optional[int]=37 ,__snake_case :Optional[Any]="gelu" ,__snake_case :int=0.1 ,__snake_case :str=0.1 ,__snake_case :List[str]=50 ,__snake_case :Union[str, Any]=0.02 ,__snake_case :Optional[int]=True ,__snake_case :Optional[Any]=None ,) -> Union[str, Any]:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_input_mask
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = initializer_range
a__ = use_labels
a__ = scope
def lowerCamelCase__( self :Tuple ) -> List[Any]:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCamelCase__( self :Union[str, Any] ) -> str:
return BertGenerationConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,is_decoder=__SCREAMING_SNAKE_CASE ,initializer_range=self.initializer_range ,)
def lowerCamelCase__( self :int ) -> Optional[int]:
(
a__
) = self.prepare_config_and_inputs()
a__ = True
a__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a__ = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCamelCase__( self :int ,__snake_case :Optional[int] ,__snake_case :Optional[Any] ,__snake_case :List[Any] ,__snake_case :List[Any] ,**__snake_case :Tuple ,) -> Dict:
a__ = BertGenerationEncoder(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE )
a__ = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__( self :Tuple ,__snake_case :Any ,__snake_case :Optional[int] ,__snake_case :Tuple ,__snake_case :List[Any] ,__snake_case :Any ,__snake_case :Union[str, Any] ,**__snake_case :Dict ,) -> Tuple:
a__ = True
a__ = BertGenerationEncoder(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(
__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,encoder_hidden_states=__SCREAMING_SNAKE_CASE ,encoder_attention_mask=__SCREAMING_SNAKE_CASE ,)
a__ = model(
__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,encoder_hidden_states=__SCREAMING_SNAKE_CASE ,)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__( self :Any ,__snake_case :Dict ,__snake_case :Optional[int] ,__snake_case :Tuple ,__snake_case :List[str] ,__snake_case :List[str] ,__snake_case :Union[str, Any] ,**__snake_case :Dict ,) -> int:
a__ = True
a__ = True
a__ = BertGenerationDecoder(config=__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE ).eval()
# first forward pass
a__ = model(
__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,encoder_hidden_states=__SCREAMING_SNAKE_CASE ,encoder_attention_mask=__SCREAMING_SNAKE_CASE ,use_cache=__SCREAMING_SNAKE_CASE ,)
a__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a__ = ids_tensor((self.batch_size, 3) ,config.vocab_size )
a__ = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
a__ = torch.cat([input_ids, next_tokens] ,dim=-1 )
a__ = torch.cat([input_mask, next_mask] ,dim=-1 )
a__ = model(
__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,encoder_hidden_states=__SCREAMING_SNAKE_CASE ,encoder_attention_mask=__SCREAMING_SNAKE_CASE ,output_hidden_states=__SCREAMING_SNAKE_CASE ,)["""hidden_states"""][0]
a__ = model(
__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,encoder_hidden_states=__SCREAMING_SNAKE_CASE ,encoder_attention_mask=__SCREAMING_SNAKE_CASE ,past_key_values=__SCREAMING_SNAKE_CASE ,output_hidden_states=__SCREAMING_SNAKE_CASE ,)["""hidden_states"""][0]
# select random slice
a__ = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
a__ = output_from_no_past[:, -3:, random_slice_idx].detach()
a__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,atol=1E-3 ) )
def lowerCamelCase__( self :List[str] ,__snake_case :Optional[int] ,__snake_case :int ,__snake_case :Dict ,__snake_case :Dict ,*__snake_case :str ,) -> List[Any]:
a__ = BertGenerationDecoder(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__( self :int ) -> Dict:
a__ = self.prepare_config_and_inputs()
a__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class snake_case_ (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
UpperCAmelCase__ : Optional[Any] = (BertGenerationDecoder,) if is_torch_available() else ()
UpperCAmelCase__ : Optional[int] = (
{'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder}
if is_torch_available()
else {}
)
def lowerCamelCase__( self :str ) -> Optional[int]:
a__ = BertGenerationEncoderTester(self )
a__ = ConfigTester(self ,config_class=__SCREAMING_SNAKE_CASE ,hidden_size=37 )
def lowerCamelCase__( self :Any ) -> str:
self.config_tester.run_common_tests()
def lowerCamelCase__( self :Union[str, Any] ) -> List[str]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__( self :List[Any] ) -> Union[str, Any]:
a__ = self.model_tester.prepare_config_and_inputs()
a__ = """bert"""
self.model_tester.create_and_check_model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__( self :Optional[int] ) -> str:
a__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__( self :Dict ) -> Optional[Any]:
a__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__( self :Tuple ) -> Union[str, Any]:
# This regression test was failing with PyTorch < 1.3
(
a__
) = self.model_tester.prepare_config_and_inputs_for_decoder()
a__ = None
self.model_tester.create_and_check_model_as_decoder(
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,)
def lowerCamelCase__( self :int ) -> Tuple:
a__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__SCREAMING_SNAKE_CASE )
@slow
def lowerCamelCase__( self :Tuple ) -> List[str]:
a__ = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_torch
class snake_case_ (unittest.TestCase ):
@slow
def lowerCamelCase__( self :List[Any] ) -> Union[str, Any]:
a__ = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
a__ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] )
with torch.no_grad():
a__ = model(__SCREAMING_SNAKE_CASE )[0]
a__ = torch.Size([1, 8, 10_24] )
self.assertEqual(output.shape ,__SCREAMING_SNAKE_CASE )
a__ = torch.tensor(
[[[0.17_75, 0.00_83, -0.03_21], [1.60_02, 0.12_87, 0.39_12], [2.14_73, 0.57_91, 0.60_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__SCREAMING_SNAKE_CASE ,atol=1E-4 ) )
@require_torch
class snake_case_ (unittest.TestCase ):
@slow
def lowerCamelCase__( self :int ) -> Union[str, Any]:
a__ = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
a__ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] )
with torch.no_grad():
a__ = model(__SCREAMING_SNAKE_CASE )[0]
a__ = torch.Size([1, 8, 5_03_58] )
self.assertEqual(output.shape ,__SCREAMING_SNAKE_CASE )
a__ = torch.tensor(
[[[-0.57_88, -2.59_94, -3.70_54], [0.04_38, 4.79_97, 1.87_95], [1.58_62, 6.64_09, 4.46_38]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__SCREAMING_SNAKE_CASE ,atol=1E-4 ) )
| 335 |
'''simple docstring'''
from __future__ import annotations
A_ : str = "Muhammad Umer Farooq"
A_ : Optional[Any] = "MIT"
A_ : int = "1.0.0"
A_ : int = "Muhammad Umer Farooq"
A_ : int = "[email protected]"
A_ : Dict = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
super().__init__()
snake_case__ : list[str] = []
snake_case__ : List[Any] = domain
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
snake_case__ : str = parse.urljoin(self.domain , __SCREAMING_SNAKE_CASE )
self.urls.append(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
return ".".join(get_sub_domain_name(__magic_name__ ).split(""".""" )[-2:] )
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
return parse.urlparse(__magic_name__ ).netloc
def UpperCamelCase__ ( __magic_name__ : str = "https://github.com" ) -> list[str]:
'''simple docstring'''
snake_case__ : List[str] = get_domain_name(__magic_name__ )
# Initialize the parser
snake_case__ : Optional[Any] = Parser(__magic_name__ )
try:
# Open URL
snake_case__ : Any = requests.get(__magic_name__ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
snake_case__ : List[str] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
snake_case__ : Tuple = requests.get(__magic_name__ )
# Get the valid email.
snake_case__ : List[str] = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__magic_name__ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__magic_name__ )
if __name__ == "__main__":
A_ : str = emails_from_url("https://github.com")
print(F'{len(emails)} emails found:')
print("\n".join(sorted(emails)))
| 38 | 0 |
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
A__ : Tuple = get_logger(__name__)
class lowercase :
__a = """dummy_data"""
__a = """datasets"""
__a = False
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Union[str, Any] = dataset_name
lowerCAmelCase__ : Optional[int] = cache_dir
lowerCAmelCase__ : Union[str, Any] = use_local_dummy_data
lowerCAmelCase__ : int = config
# download_callbacks take a single url as input
lowerCAmelCase__ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowerCAmelCase__ : Union[str, Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowerCAmelCase__ : Union[str, Any] = str(__SCREAMING_SNAKE_CASE )
# to be downloaded
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : List[str] = None
@property
def lowercase_ ( self ):
"""simple docstring"""
if self._dummy_file is None:
lowerCAmelCase__ : List[str] = self.download_dummy_data()
return self._dummy_file
@property
def lowercase_ ( self ):
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name )
@property
def lowercase_ ( self ):
"""simple docstring"""
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowerCAmelCase__ : Optional[int] = cached_path(
__SCREAMING_SNAKE_CASE , cache_dir=self.cache_dir , extract_compressed_file=__SCREAMING_SNAKE_CASE , force_extract=__SCREAMING_SNAKE_CASE )
return os.path.join(__SCREAMING_SNAKE_CASE , self.dummy_file_name )
@property
def lowercase_ ( self ):
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def lowercase_ ( self ):
"""simple docstring"""
if self._bucket_url is None:
lowerCAmelCase__ : List[str] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) )
return self._bucket_url
@property
def lowercase_ ( self ):
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] )
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowerCAmelCase__ : List[Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowerCAmelCase__ : List[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.create_dummy_data_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
return self.create_dummy_data_list(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
return self.create_dummy_data_single(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
return path
def lowercase_ ( self ):
"""simple docstring"""
return {}
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : int = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for single_url in single_urls:
download_callback(__SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase__ : List[str] = single_urls
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ : Tuple = [os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) ) for x in single_urls]
else:
lowerCAmelCase__ : List[Any] = single_urls
lowerCAmelCase__ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) )
lowerCAmelCase__ : Optional[int] = value
# make sure that values are unique
if all(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowerCAmelCase__ : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : Dict = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowerCAmelCase__ : Tuple = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , __SCREAMING_SNAKE_CASE ) ) for url in data_url )
lowerCAmelCase__ : List[Any] = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowerCAmelCase__ : List[str] = [data_url[0]] * len(__SCREAMING_SNAKE_CASE )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCAmelCase__ : List[Any] = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(__SCREAMING_SNAKE_CASE )
return dummy_data_list
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCAmelCase__ : Any = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(__SCREAMING_SNAKE_CASE ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def lowercase_ ( self ):
"""simple docstring"""
pass
def lowercase_ ( self ):
"""simple docstring"""
pass
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def _iter_archive_members(SCREAMING_SNAKE_CASE__ ):
# this preserves the order of the members inside the ZIP archive
lowerCAmelCase__ : List[str] = Path(self.dummy_file ).parent
lowerCAmelCase__ : Dict = path.relative_to(__SCREAMING_SNAKE_CASE )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowerCAmelCase__ : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Any = Path(__SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : int = _iter_archive_members(__SCREAMING_SNAKE_CASE ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(__SCREAMING_SNAKE_CASE ).as_posix(), file_path.open('''rb''' )
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ : Optional[int] = [paths]
for path in paths:
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(__SCREAMING_SNAKE_CASE ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 233 |
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> Tuple:
'''simple docstring'''
if not head:
return True
# split the list to two parts
snake_case__ , snake_case__ : Dict = head.next, head
while fast and fast.next:
snake_case__ : Any = fast.next.next
snake_case__ : int = slow.next
snake_case__ : Dict = slow.next
snake_case__ : List[str] = None # Don't forget here! But forget still works!
# reverse the second part
snake_case__ : Tuple = None
while second:
snake_case__ : Tuple = second.next
snake_case__ : Any = node
snake_case__ : str = second
snake_case__ : Optional[Any] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
snake_case__ : List[Any] = node.next
snake_case__ : int = head.next
return True
def UpperCamelCase__ ( __magic_name__ : Any ) -> Optional[Any]:
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
snake_case__ : List[Any] = head
while fast and fast.next:
snake_case__ , snake_case__ : Any = fast.next.next, slow.next
# 2. Push the second half into the stack
snake_case__ : Tuple = [slow.val]
while slow.next:
snake_case__ : Optional[Any] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
snake_case__ : str = cur.next
return True
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
if not head or not head.next:
return True
snake_case__ : int = {}
snake_case__ : Union[str, Any] = 0
while head:
if head.val in d:
d[head.val].append(__magic_name__ )
else:
snake_case__ : Tuple = [pos]
snake_case__ : Optional[Any] = head.next
pos += 1
snake_case__ : int = pos - 1
snake_case__ : str = 0
for v in d.values():
if len(__magic_name__ ) % 2 != 0:
middle += 1
else:
snake_case__ : List[str] = 0
for i in range(0 , len(__magic_name__ ) ):
if v[i] + v[len(__magic_name__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 38 | 0 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCamelCase( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = CodeGenTokenizer
__SCREAMING_SNAKE_CASE : Optional[int] = CodeGenTokenizerFast
__SCREAMING_SNAKE_CASE : str = True
__SCREAMING_SNAKE_CASE : Tuple = {'''add_prefix_space''': True}
__SCREAMING_SNAKE_CASE : List[str] = False
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a : Tuple = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
__a : Optional[Any] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
__a : Optional[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__a : str = {"""unk_token""": """<unk>"""}
__a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__SCREAMING_SNAKE_CASE ) )
def __lowerCAmelCase ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self : Tuple , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
__a : Tuple = """lower newer"""
__a : Optional[Any] = """lower newer"""
return input_text, output_text
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : Tuple = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a : str = """lower newer"""
__a : List[str] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__a : Union[str, Any] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a : Optional[int] = tokens + [tokenizer.unk_token]
__a : Any = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__a : int = self.get_tokenizer()
__a : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=__SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = """lower newer"""
# Testing tokenization
__a : Optional[Any] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
__a : Dict = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing conversion to ids without special tokens
__a : Any = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
__a : Dict = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing conversion to ids with special tokens
__a : str = self.get_rust_tokenizer(add_prefix_space=__SCREAMING_SNAKE_CASE )
__a : Optional[Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
__a : int = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing the unknown token
__a : Dict = tokens + [rust_tokenizer.unk_token]
__a : int = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
pass
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : str=1_5 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# Simple input
__a : str = """This is a simple input"""
__a : Any = ["""This is a simple input 1""", """This is a simple input 2"""]
__a : Optional[int] = ("""This is a simple input""", """This is a pair""")
__a : Union[str, Any] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding='max_length' )
# Simple input
self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding='max_length' )
# Simple input
self.assertRaises(
__SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding='max_length' , )
# Pair input
self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding='max_length' )
# Pair input
self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding='max_length' )
# Pair input
self.assertRaises(
__SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding='max_length' , )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
__a : Any = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
__a : int = """This is a simple input"""
__a : List[Any] = ["""This is a simple input looooooooong""", """This is a simple input"""]
__a : List[str] = ("""This is a simple input""", """This is a pair""")
__a : List[str] = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
__a : Optional[Any] = tokenizer.pad_token_id
__a : int = tokenizer(__SCREAMING_SNAKE_CASE , padding='max_length' , max_length=3_0 , return_tensors='np' )
__a : str = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncate=__SCREAMING_SNAKE_CASE , return_tensors='np' )
__a : Tuple = tokenizer(*__SCREAMING_SNAKE_CASE , padding='max_length' , max_length=6_0 , return_tensors='np' )
__a : Optional[Any] = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncate=__SCREAMING_SNAKE_CASE , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 3_0 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 6_0 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : str = """$$$"""
__a : Dict = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__SCREAMING_SNAKE_CASE , add_bos_token=__SCREAMING_SNAKE_CASE )
__a : int = """This is a simple input"""
__a : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
__a : List[str] = tokenizer.bos_token_id
__a : List[str] = tokenizer(__SCREAMING_SNAKE_CASE )
__a : int = tokenizer(__SCREAMING_SNAKE_CASE )
self.assertEqual(out_s.input_ids[0] , __SCREAMING_SNAKE_CASE )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__a : str = tokenizer.decode(out_s.input_ids )
__a : Optional[int] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __SCREAMING_SNAKE_CASE )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : List[Any] = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
__a : Optional[int] = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
__a : List[str] = """\nif len_a > len_b: result = a\nelse: result = b"""
__a : int = tokenizer.encode(__SCREAMING_SNAKE_CASE )
__a : Optional[Any] = ["""^#""", re.escape('<|endoftext|>' ), """^'''""", """^\"\"\"""", """\n\n\n"""]
__a : Any = tokenizer.decode(__SCREAMING_SNAKE_CASE , truncate_before_pattern=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
pass
| 47 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A_ : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A_ : str = 250004
A_ : str = 250020
@require_sentencepiece
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MBartTokenizer
lowerCamelCase__ = MBartTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
def __UpperCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case__ : Tuple = MBartTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = MBartTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
snake_case__ : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
snake_case__ : Optional[int] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def __UpperCamelCase ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case__ : Optional[int] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = tempfile.mkdtemp()
snake_case__ : int = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
snake_case__ : List[str] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case__ : Tuple = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=True
snake_case__ : Any = tempfile.mkdtemp()
snake_case__ : Optional[int] = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case__ : List[Any] = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=False
snake_case__ : Dict = tempfile.mkdtemp()
snake_case__ : Union[str, Any] = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case__ : Dict = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = '''facebook/mbart-large-en-ro'''
lowerCamelCase__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
lowerCamelCase__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
lowerCamelCase__ = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def __UpperCamelCase ( cls ):
snake_case__ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
snake_case__ : Any = 1
return cls
def __UpperCamelCase ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 2_5_0_0_2_0 )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
self.assertIn(__SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids )
snake_case__ : List[str] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
snake_case__ : List[Any] = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = ["""this is gunna be a long sentence """ * 2_0]
assert isinstance(src_text[0] , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = 1_0
snake_case__ : int = self.tokenizer(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __SCREAMING_SNAKE_CASE )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = tempfile.mkdtemp()
snake_case__ : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = MBartTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __SCREAMING_SNAKE_CASE )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
snake_case__ : int = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
snake_case__ : List[str] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
snake_case__ : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.tokenizer(self.src_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=3 , return_tensors="""pt""" )
snake_case__ : Optional[int] = self.tokenizer(
text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=1_0 , return_tensors="""pt""" )
snake_case__ : str = targets["""input_ids"""]
snake_case__ : Optional[Any] = shift_tokens_right(__SCREAMING_SNAKE_CASE , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , {
# A, test, EOS, en_XX
"""input_ids""": [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 2_5_0_0_0_1,
} , )
| 38 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _lowercase :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any]=13 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=7 , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : int=99 , SCREAMING_SNAKE_CASE_ : Optional[Any]=32 , SCREAMING_SNAKE_CASE_ : Tuple=2 , SCREAMING_SNAKE_CASE_ : Any=4 , SCREAMING_SNAKE_CASE_ : List[Any]=37 , SCREAMING_SNAKE_CASE_ : List[Any]="gelu" , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : str=512 , SCREAMING_SNAKE_CASE_ : Dict=16 , SCREAMING_SNAKE_CASE_ : Optional[int]=2 , SCREAMING_SNAKE_CASE_ : Any=0.0_2 , SCREAMING_SNAKE_CASE_ : str=3 , SCREAMING_SNAKE_CASE_ : Tuple=4 , SCREAMING_SNAKE_CASE_ : int=None , ) -> Any:
__snake_case = parent
__snake_case = 13
__snake_case = 7
__snake_case = True
__snake_case = True
__snake_case = True
__snake_case = True
__snake_case = 99
__snake_case = 384
__snake_case = 2
__snake_case = 4
__snake_case = 37
__snake_case = """gelu"""
__snake_case = 0.1
__snake_case = 0.1
__snake_case = 512
__snake_case = 16
__snake_case = 2
__snake_case = 0.0_2
__snake_case = 3
__snake_case = 4
__snake_case = 128
__snake_case = 2
__snake_case = 9
__snake_case = 1
__snake_case = None
def a ( self : Optional[Any] ) -> Optional[Any]:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = ids_tensor([self.batch_size] , self.num_choices )
__snake_case = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__SCREAMING_SNAKE_CASE , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a ( self : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Dict:
__snake_case = TFConvBertModel(config=__SCREAMING_SNAKE_CASE )
__snake_case = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__snake_case = [input_ids, input_mask]
__snake_case = model(__SCREAMING_SNAKE_CASE )
__snake_case = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> Any:
__snake_case = TFConvBertForMaskedLM(config=__SCREAMING_SNAKE_CASE )
__snake_case = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__snake_case = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[int]:
__snake_case = self.num_labels
__snake_case = TFConvBertForSequenceClassification(config=__SCREAMING_SNAKE_CASE )
__snake_case = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__snake_case = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a ( self : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple ) -> List[str]:
__snake_case = self.num_choices
__snake_case = TFConvBertForMultipleChoice(config=__SCREAMING_SNAKE_CASE )
__snake_case = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__snake_case = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__snake_case = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__snake_case = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__snake_case = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a ( self : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[str]:
__snake_case = self.num_labels
__snake_case = TFConvBertForTokenClassification(config=__SCREAMING_SNAKE_CASE )
__snake_case = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__snake_case = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ) -> Dict:
__snake_case = TFConvBertForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
__snake_case = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__snake_case = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a ( self : Optional[int] ) -> int:
__snake_case = self.prepare_config_and_inputs()
(
__snake_case
) = config_and_inputs
__snake_case = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_SCREAMING_SNAKE_CASE : List[str] = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_SCREAMING_SNAKE_CASE : List[str] = False
_SCREAMING_SNAKE_CASE : List[Any] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
def a ( self : Any ) -> List[Any]:
__snake_case = TFConvBertModelTester(self )
__snake_case = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def a ( self : str ) -> Tuple:
self.config_tester.run_common_tests()
def a ( self : str ) -> List[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def a ( self : List[Any] ) -> Tuple:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def a ( self : Tuple ) -> Union[str, Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__SCREAMING_SNAKE_CASE )
def a ( self : Any ) -> Union[str, Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE )
def a ( self : str ) -> List[str]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def a ( self : Dict ) -> int:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE )
@slow
def a ( self : Optional[int] ) -> int:
__snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
__snake_case = True
if hasattr(__SCREAMING_SNAKE_CASE , 'use_cache' ):
__snake_case = True
__snake_case = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__snake_case = getattr(self.model_tester , 'key_length' , __SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
__snake_case = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = model_class(__SCREAMING_SNAKE_CASE )
__snake_case = len(model(__SCREAMING_SNAKE_CASE ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__SCREAMING_SNAKE_CASE , saved_model=__SCREAMING_SNAKE_CASE )
__snake_case = os.path.join(__SCREAMING_SNAKE_CASE , 'saved_model' , '1' )
__snake_case = tf.keras.models.load_model(__SCREAMING_SNAKE_CASE )
__snake_case = model(__SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
__snake_case = outputs["""encoder_hidden_states"""]
__snake_case = outputs["""encoder_attentions"""]
else:
__snake_case = outputs["""hidden_states"""]
__snake_case = outputs["""attentions"""]
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
__snake_case = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def a ( self : Union[str, Any] ) -> List[Any]:
__snake_case = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def a ( self : List[str] ) -> Any:
__snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
__snake_case = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
__snake_case = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__snake_case = getattr(self.model_tester , 'key_length' , __SCREAMING_SNAKE_CASE )
__snake_case = getattr(self.model_tester , 'key_length' , __SCREAMING_SNAKE_CASE )
def check_decoder_attentions_output(SCREAMING_SNAKE_CASE_ : Optional[int] ):
__snake_case = len(__SCREAMING_SNAKE_CASE )
self.assertEqual(out_len % 2 , 0 )
__snake_case = outputs.decoder_attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(SCREAMING_SNAKE_CASE_ : Dict ):
__snake_case = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__snake_case = True
__snake_case = False
__snake_case = model_class(__SCREAMING_SNAKE_CASE )
__snake_case = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__snake_case = len(__SCREAMING_SNAKE_CASE )
self.assertEqual(config.output_hidden_states , __SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(__SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
__snake_case = model_class(__SCREAMING_SNAKE_CASE )
__snake_case = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , __SCREAMING_SNAKE_CASE )
check_decoder_attentions_output(__SCREAMING_SNAKE_CASE )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__snake_case = True
__snake_case = model_class(__SCREAMING_SNAKE_CASE )
__snake_case = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , __SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(__SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
__snake_case = True
__snake_case = True
__snake_case = model_class(__SCREAMING_SNAKE_CASE )
__snake_case = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__SCREAMING_SNAKE_CASE ) )
self.assertEqual(model.config.output_hidden_states , __SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(__SCREAMING_SNAKE_CASE )
@require_tf
class _lowercase ( unittest.TestCase ):
@slow
def a ( self : List[str] ) -> int:
__snake_case = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
__snake_case = tf.constant([[0, 1, 2, 3, 4, 5]] )
__snake_case = model(__SCREAMING_SNAKE_CASE )[0]
__snake_case = [1, 6, 768]
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__snake_case = tf.constant(
[
[
[-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2],
[0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4],
[0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 )
| 56 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : Dict = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''bit'''
lowerCamelCase__ = ['''preactivation''', '''bottleneck''']
lowerCamelCase__ = ['''SAME''', '''VALID''']
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE="preactivation" , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
snake_case__ : Tuple = global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported" )
snake_case__ : List[str] = num_channels
snake_case__ : Tuple = embedding_size
snake_case__ : str = hidden_sizes
snake_case__ : Optional[Any] = depths
snake_case__ : List[Any] = layer_type
snake_case__ : Dict = hidden_act
snake_case__ : Union[str, Any] = global_padding
snake_case__ : List[str] = num_groups
snake_case__ : str = drop_path_rate
snake_case__ : List[Any] = embedding_dynamic_padding
snake_case__ : List[str] = output_stride
snake_case__ : Dict = width_factor
snake_case__ : List[str] = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
snake_case__ , snake_case__ : Dict = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 38 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case__ : Optional[int] = 1_6
snake_case__ : Optional[Any] = 3_2
def _snake_case (__lowercase , __lowercase = 16):
UpperCamelCase_ = AutoTokenizer.from_pretrained('bert-base-cased')
UpperCamelCase_ = load_dataset('glue' , 'mrpc')
def tokenize_function(__lowercase):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase_ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__lowercase , max_length=__lowercase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase_ = datasets.map(
__lowercase , batched=__lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase_ = tokenized_datasets.rename_column('label' , 'labels')
def collate_fn(__lowercase):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase_ = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase_ = 8
else:
UpperCamelCase_ = None
return tokenizer.pad(
__lowercase , padding='longest' , max_length=__lowercase , pad_to_multiple_of=__lowercase , return_tensors='pt' , )
# Instantiate dataloaders.
UpperCamelCase_ = DataLoader(
tokenized_datasets['train'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase)
UpperCamelCase_ = DataLoader(
tokenized_datasets['validation'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
snake_case__ : Tuple = mocked_dataloaders # noqa: F811
def _snake_case (__lowercase , __lowercase):
if os.environ.get('TESTING_MOCKED_DATALOADERS' , __lowercase) == "1":
UpperCamelCase_ = 2
# Initialize accelerator
UpperCamelCase_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase_ = config["""lr"""]
UpperCamelCase_ = int(config['num_epochs'])
UpperCamelCase_ = int(config['seed'])
UpperCamelCase_ = int(config['batch_size'])
UpperCamelCase_ = evaluate.load('glue' , 'mrpc')
# If the batch size is too big we use gradient accumulation
UpperCamelCase_ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCamelCase_ = batch_size // MAX_GPU_BATCH_SIZE
UpperCamelCase_ = MAX_GPU_BATCH_SIZE
set_seed(__lowercase)
UpperCamelCase_ = get_dataloaders(__lowercase , __lowercase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase_ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__lowercase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase_ = model.to(accelerator.device)
# Instantiate optimizer
UpperCamelCase_ = AdamW(params=model.parameters() , lr=__lowercase)
# Instantiate scheduler
UpperCamelCase_ = get_linear_schedule_with_warmup(
optimizer=__lowercase , num_warmup_steps=100 , num_training_steps=(len(__lowercase) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase_ = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
# Now we train the model
for epoch in range(__lowercase):
model.train()
for step, batch in enumerate(__lowercase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
UpperCamelCase_ = model(**__lowercase)
UpperCamelCase_ = outputs.loss
UpperCamelCase_ = loss / gradient_accumulation_steps
accelerator.backward(__lowercase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
UpperCamelCase_ = 0
for step, batch in enumerate(__lowercase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
UpperCamelCase_ = model(**__lowercase)
UpperCamelCase_ = outputs.logits.argmax(dim=-1)
UpperCamelCase_ = accelerator.gather((predictions, batch['labels']))
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(__lowercase) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
UpperCamelCase_ = predictions[: len(eval_dataloader.dataset) - samples_seen]
UpperCamelCase_ = references[: len(eval_dataloader.dataset) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=__lowercase , references=__lowercase , )
UpperCamelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowercase)
def _snake_case ():
UpperCamelCase_ = argparse.ArgumentParser(description='Simple example of training script.')
parser.add_argument(
'--mixed_precision' , type=__lowercase , default=__lowercase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.')
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__lowercase , __lowercase)
if __name__ == "__main__":
main()
| 23 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Optional[int] = logging.get_logger(__name__)
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : str=False ) -> Tuple:
'''simple docstring'''
snake_case__ : int = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Tuple=False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : int = """"""
else:
snake_case__ : Dict = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : int = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case__ : Union[str, Any] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Optional[Any] = in_proj_bias[: config.hidden_size]
snake_case__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : Optional[int] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case__ : str = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : List[str] = dct.pop(__magic_name__ )
snake_case__ : Dict = val
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Optional[int] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : int=False ) -> Optional[int]:
'''simple docstring'''
snake_case__ : int = BitConfig(
global_padding="""same""" , layer_type="""bottleneck""" , depths=(3, 4, 9) , out_features=["""stage3"""] , embedding_dynamic_padding=__magic_name__ , )
snake_case__ : Optional[int] = ViTHybridConfig(backbone_config=__magic_name__ , image_size=3_84 , num_labels=10_00 )
snake_case__ : Union[str, Any] = False
# load original model from timm
snake_case__ : List[Any] = timm.create_model(__magic_name__ , pretrained=__magic_name__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : Optional[int] = timm_model.state_dict()
if base_model:
remove_classification_head_(__magic_name__ )
snake_case__ : int = create_rename_keys(__magic_name__ , __magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ , __magic_name__ )
snake_case__ : str = """huggingface/label-files"""
snake_case__ : Union[str, Any] = """imagenet-1k-id2label.json"""
snake_case__ : Dict = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : List[Any] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case__ : int = idalabel
snake_case__ : str = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case__ : str = ViTHybridModel(__magic_name__ ).eval()
else:
snake_case__ : Union[str, Any] = ViTHybridForImageClassification(__magic_name__ ).eval()
model.load_state_dict(__magic_name__ )
# create image processor
snake_case__ : Optional[Any] = create_transform(**resolve_data_config({} , model=__magic_name__ ) )
snake_case__ : Union[str, Any] = transform.transforms
snake_case__ : Tuple = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
snake_case__ : Any = ViTHybridImageProcessor(
do_resize=__magic_name__ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__magic_name__ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=__magic_name__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case__ : Any = prepare_img()
snake_case__ : int = transform(__magic_name__ ).unsqueeze(0 )
snake_case__ : List[str] = processor(__magic_name__ , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(__magic_name__ , __magic_name__ )
# verify logits
with torch.no_grad():
snake_case__ : Optional[Any] = model(__magic_name__ )
snake_case__ : Union[str, Any] = outputs.logits
print("""Predicted class:""" , logits.argmax(-1 ).item() )
if base_model:
snake_case__ : Dict = timm_model.forward_features(__magic_name__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__magic_name__ , outputs.pooler_output , atol=1E-3 )
else:
snake_case__ : int = timm_model(__magic_name__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__magic_name__ , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(__magic_name__ )
if push_to_hub:
print(f"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(f"ybelkada/{vit_name}" )
processor.push_to_hub(f"ybelkada/{vit_name}" )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
A_ : Union[str, Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 38 | 0 |
from PIL import Image
def _lowercase ( __SCREAMING_SNAKE_CASE ) -> Image:
UpperCamelCase__ : Tuple = image.size
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Optional[Any] = image.load()
for i in range(__SCREAMING_SNAKE_CASE ):
for j in range(__SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Any = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__SCREAMING_SNAKE_CASE ):
for i in range(__SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Any = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
UpperCAmelCase__ : Any = mean_threshold(Image.open('''path_to_image''').convert('''L'''))
image.save('''output_image_path''')
| 410 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = 42
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=("DownEncoderBlock2D",) , __SCREAMING_SNAKE_CASE=(6_4,) , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE="silu" , __SCREAMING_SNAKE_CASE=True , ):
super().__init__()
snake_case__ : str = layers_per_block
snake_case__ : int = torch.nn.Convad(
__SCREAMING_SNAKE_CASE , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
snake_case__ : List[Any] = None
snake_case__ : List[Any] = nn.ModuleList([] )
# down
snake_case__ : Union[str, Any] = block_out_channels[0]
for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = output_channel
snake_case__ : Union[str, Any] = block_out_channels[i]
snake_case__ : int = i == len(__SCREAMING_SNAKE_CASE ) - 1
snake_case__ : str = get_down_block(
__SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block , in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__SCREAMING_SNAKE_CASE , resnet_groups=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
self.down_blocks.append(__SCREAMING_SNAKE_CASE )
# mid
snake_case__ : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
# out
snake_case__ : Tuple = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__SCREAMING_SNAKE_CASE , eps=1e-6 )
snake_case__ : Tuple = nn.SiLU()
snake_case__ : str = 2 * out_channels if double_z else out_channels
snake_case__ : int = nn.Convad(block_out_channels[-1] , __SCREAMING_SNAKE_CASE , 3 , padding=1 )
snake_case__ : Union[str, Any] = False
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = x
snake_case__ : int = self.conv_in(__SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
snake_case__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
# middle
snake_case__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
snake_case__ : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# middle
snake_case__ : str = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
snake_case__ : List[str] = down_block(__SCREAMING_SNAKE_CASE )
# middle
snake_case__ : str = self.mid_block(__SCREAMING_SNAKE_CASE )
# post-process
snake_case__ : Any = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = self.conv_act(__SCREAMING_SNAKE_CASE )
snake_case__ : str = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=("UpDecoderBlock2D",) , __SCREAMING_SNAKE_CASE=(6_4,) , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE="silu" , __SCREAMING_SNAKE_CASE="group" , ):
super().__init__()
snake_case__ : Any = layers_per_block
snake_case__ : Optional[Any] = nn.Convad(
__SCREAMING_SNAKE_CASE , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
snake_case__ : Union[str, Any] = None
snake_case__ : Dict = nn.ModuleList([] )
snake_case__ : Optional[int] = in_channels if norm_type == """spatial""" else None
# mid
snake_case__ : Tuple = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
# up
snake_case__ : List[Any] = list(reversed(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = output_channel
snake_case__ : Optional[Any] = reversed_block_out_channels[i]
snake_case__ : List[str] = i == len(__SCREAMING_SNAKE_CASE ) - 1
snake_case__ : int = get_up_block(
__SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , prev_output_channel=__SCREAMING_SNAKE_CASE , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , resnet_groups=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , resnet_time_scale_shift=__SCREAMING_SNAKE_CASE , )
self.up_blocks.append(__SCREAMING_SNAKE_CASE )
snake_case__ : int = output_channel
# out
if norm_type == "spatial":
snake_case__ : List[Any] = SpatialNorm(block_out_channels[0] , __SCREAMING_SNAKE_CASE )
else:
snake_case__ : Any = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__SCREAMING_SNAKE_CASE , eps=1e-6 )
snake_case__ : Tuple = nn.SiLU()
snake_case__ : Union[str, Any] = nn.Convad(block_out_channels[0] , __SCREAMING_SNAKE_CASE , 3 , padding=1 )
snake_case__ : int = False
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
snake_case__ : Union[str, Any] = z
snake_case__ : Any = self.conv_in(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
snake_case__ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
snake_case__ : int = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
else:
# middle
snake_case__ : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : str = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
# middle
snake_case__ : List[Any] = self.mid_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : Dict = up_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
snake_case__ : Optional[Any] = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : str = self.conv_norm_out(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.conv_act(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="random" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True ):
super().__init__()
snake_case__ : int = n_e
snake_case__ : Optional[int] = vq_embed_dim
snake_case__ : int = beta
snake_case__ : Optional[int] = legacy
snake_case__ : Dict = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
snake_case__ : List[str] = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
snake_case__ : Optional[Any] = self.used.shape[0]
snake_case__ : List[str] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
snake_case__ : Dict = self.re_embed
snake_case__ : List[str] = self.re_embed + 1
print(
f"Remapping {self.n_e} indices to {self.re_embed} indices. "
f"Using {self.unknown_index} for unknown indices." )
else:
snake_case__ : Union[str, Any] = n_e
snake_case__ : str = sane_index_shape
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
snake_case__ : Dict = inds.reshape(ishape[0] , -1 )
snake_case__ : Any = self.used.to(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = (inds[:, :, None] == used[None, None, ...]).long()
snake_case__ : List[Any] = match.argmax(-1 )
snake_case__ : List[str] = match.sum(2 ) < 1
if self.unknown_index == "random":
snake_case__ : List[str] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
snake_case__ : Optional[Any] = self.unknown_index
return new.reshape(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
snake_case__ : int = inds.reshape(ishape[0] , -1 )
snake_case__ : Optional[int] = self.used.to(__SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
snake_case__ : List[Any] = 0 # simply set to zero
snake_case__ : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __SCREAMING_SNAKE_CASE )
return back.reshape(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
# reshape z -> (batch, height, width, channel) and flatten
snake_case__ : Any = z.permute(0 , 2 , 3 , 1 ).contiguous()
snake_case__ : Optional[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
snake_case__ : Dict = torch.argmin(torch.cdist(__SCREAMING_SNAKE_CASE , self.embedding.weight ) , dim=1 )
snake_case__ : Union[str, Any] = self.embedding(__SCREAMING_SNAKE_CASE ).view(z.shape )
snake_case__ : List[str] = None
snake_case__ : Union[str, Any] = None
# compute loss for embedding
if not self.legacy:
snake_case__ : Tuple = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
snake_case__ : List[Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
snake_case__ : Any = z + (z_q - z).detach()
# reshape back to match original input shape
snake_case__ : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
snake_case__ : List[Any] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
snake_case__ : str = self.remap_to_used(__SCREAMING_SNAKE_CASE )
snake_case__ : str = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
snake_case__ : Tuple = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
snake_case__ : List[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
snake_case__ : Optional[int] = self.unmap_to_all(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
snake_case__ : int = self.embedding(__SCREAMING_SNAKE_CASE )
if shape is not None:
snake_case__ : str = z_q.view(__SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
snake_case__ : str = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
snake_case__ : Tuple = parameters
snake_case__ , snake_case__ : Any = torch.chunk(__SCREAMING_SNAKE_CASE , 2 , dim=1 )
snake_case__ : Union[str, Any] = torch.clamp(self.logvar , -30.0 , 20.0 )
snake_case__ : Optional[int] = deterministic
snake_case__ : Optional[int] = torch.exp(0.5 * self.logvar )
snake_case__ : Any = torch.exp(self.logvar )
if self.deterministic:
snake_case__ : List[str] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE = None ):
# make sure sample is on the same device as the parameters and has same dtype
snake_case__ : Dict = randn_tensor(
self.mean.shape , generator=__SCREAMING_SNAKE_CASE , device=self.parameters.device , dtype=self.parameters.dtype )
snake_case__ : Optional[int] = self.mean + self.std * sample
return x
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
snake_case__ : Any = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
return self.mean
| 38 | 0 |
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def __A ( _A ):
"""simple docstring"""
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(_A ):
return ext
raise Exception(
f"""Unable to determine file format from file extension {path}. """
f"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def __A ( _A ):
"""simple docstring"""
__a = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
__a = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
__a = PipelineDataFormat.from_str(
format=_A , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(_A , _A )
class A_ ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] ):
__a = nlp
__a = reader
@staticmethod
def _UpperCAmelCase ( __SCREAMING_SNAKE_CASE : Tuple ):
__a = parser.add_parser("run" , help="Run a pipeline through the CLI" )
run_parser.add_argument("--task" , choices=get_supported_tasks() , help="Task to run" )
run_parser.add_argument("--input" , type=__SCREAMING_SNAKE_CASE , help="Path to the file to use for inference" )
run_parser.add_argument("--output" , type=__SCREAMING_SNAKE_CASE , help="Path to the file that will be used post to write results." )
run_parser.add_argument("--model" , type=__SCREAMING_SNAKE_CASE , help="Name or path to the model to instantiate." )
run_parser.add_argument("--config" , type=__SCREAMING_SNAKE_CASE , help="Name or path to the model's config to instantiate." )
run_parser.add_argument(
"--tokenizer" , type=__SCREAMING_SNAKE_CASE , help="Name of the tokenizer to use. (default: same as the model name)" )
run_parser.add_argument(
"--column" , type=__SCREAMING_SNAKE_CASE , help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" , )
run_parser.add_argument(
"--format" , type=__SCREAMING_SNAKE_CASE , default="infer" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="Input format to read from" , )
run_parser.add_argument(
"--device" , type=__SCREAMING_SNAKE_CASE , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
run_parser.add_argument("--overwrite" , action="store_true" , help="Allow overwriting the output file." )
run_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Any ):
__a = self._nlp, []
for entry in self._reader:
__a = nlp(**__SCREAMING_SNAKE_CASE ) if self._reader.is_multi_columns else nlp(__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
outputs.append(__SCREAMING_SNAKE_CASE )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
__a = self._reader.save_binary(__SCREAMING_SNAKE_CASE )
logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(__SCREAMING_SNAKE_CASE )
| 197 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[1, 3_8_4, 2_4, 2_4] , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , ):
snake_case__ : str = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : Optional[int] = patch_size
snake_case__ : List[str] = num_channels
snake_case__ : Any = is_training
snake_case__ : int = use_labels
snake_case__ : str = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : str = backbone_out_indices
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Dict = intermediate_size
snake_case__ : Optional[Any] = hidden_act
snake_case__ : str = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : Dict = initializer_range
snake_case__ : Optional[int] = num_labels
snake_case__ : str = backbone_featmap_shape
snake_case__ : List[Any] = scope
snake_case__ : Optional[Any] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
snake_case__ : List[Any] = (image_size // patch_size) ** 2
snake_case__ : Union[str, Any] = num_patches + 1
def __UpperCamelCase ( self ):
snake_case__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : str = None
if self.use_labels:
snake_case__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
snake_case__ : Any = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [9_6, 1_9_2, 3_8_4, 7_6_8],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__SCREAMING_SNAKE_CASE , backbone_featmap_shape=self.backbone_featmap_shape , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = DPTModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = self.num_labels
snake_case__ : str = DPTForDepthEstimation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = self.num_labels
snake_case__ : Dict = DPTForSemanticSegmentation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : str = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Any = config_and_inputs
snake_case__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowerCamelCase__ = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = DPTModelTester(self )
snake_case__ : Any = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Tuple = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : List[str] = [*signature.parameters.keys()]
snake_case__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : int = True
if model_class in get_values(__SCREAMING_SNAKE_CASE ):
continue
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
snake_case__ : Optional[Any] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCamelCase ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Union[str, Any] = False
snake_case__ : str = True
if model_class in get_values(__SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
snake_case__ : List[str] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : str = _config_zero_init(__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(config=__SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
snake_case__ : str = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
snake_case__ : Optional[int] = [f"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __UpperCamelCase ( self ):
pass
@slow
def __UpperCamelCase ( self ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
snake_case__ : List[str] = DPTModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Dict = """add"""
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] = DPTForDepthEstimation(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> Dict:
'''simple docstring'''
snake_case__ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Dict = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
snake_case__ : Union[str, Any] = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = prepare_img()
snake_case__ : Optional[int] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case__ : Dict = model(**__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = outputs.predicted_depth
# verify the predicted depth
snake_case__ : Any = torch.Size((1, 3_8_4, 3_8_4) )
self.assertEqual(predicted_depth.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 38 | 0 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 42
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : int=3 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Tuple=("DownEncoderBlock2D",) , UpperCamelCase__ : Dict=(64,) , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Union[str, Any]=32 , UpperCamelCase__ : List[Any]="silu" , UpperCamelCase__ : int=True , ):
super().__init__()
A = layers_per_block
A = torch.nn.Convad(
__SCREAMING_SNAKE_CASE , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
A = None
A = nn.ModuleList([] )
# down
A = block_out_channels[0]
for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
A = output_channel
A = block_out_channels[i]
A = i == len(__SCREAMING_SNAKE_CASE ) - 1
A = get_down_block(
__SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block , in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__SCREAMING_SNAKE_CASE , resnet_groups=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
self.down_blocks.append(__SCREAMING_SNAKE_CASE )
# mid
A = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
# out
A = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__SCREAMING_SNAKE_CASE , eps=1e-6 )
A = nn.SiLU()
A = 2 * out_channels if double_z else out_channels
A = nn.Convad(block_out_channels[-1] , __SCREAMING_SNAKE_CASE , 3 , padding=1 )
A = False
def UpperCamelCase ( self : Union[str, Any] , UpperCamelCase__ : List[str] ):
A = x
A = self.conv_in(__SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ : List[Any] ):
def custom_forward(*UpperCamelCase__ : Union[str, Any] ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
A = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# middle
A = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
A = down_block(__SCREAMING_SNAKE_CASE )
# middle
A = self.mid_block(__SCREAMING_SNAKE_CASE )
# post-process
A = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
A = self.conv_act(__SCREAMING_SNAKE_CASE )
A = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[int]=("UpDecoderBlock2D",) , UpperCamelCase__ : List[str]=(64,) , UpperCamelCase__ : int=2 , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : Union[str, Any]="silu" , UpperCamelCase__ : Tuple="group" , ):
super().__init__()
A = layers_per_block
A = nn.Convad(
__SCREAMING_SNAKE_CASE , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
A = None
A = nn.ModuleList([] )
A = in_channels if norm_type == """spatial""" else None
# mid
A = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
# up
A = list(reversed(__SCREAMING_SNAKE_CASE ) )
A = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
A = output_channel
A = reversed_block_out_channels[i]
A = i == len(__SCREAMING_SNAKE_CASE ) - 1
A = get_up_block(
__SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , prev_output_channel=__SCREAMING_SNAKE_CASE , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , resnet_groups=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , resnet_time_scale_shift=__SCREAMING_SNAKE_CASE , )
self.up_blocks.append(__SCREAMING_SNAKE_CASE )
A = output_channel
# out
if norm_type == "spatial":
A = SpatialNorm(block_out_channels[0] , __SCREAMING_SNAKE_CASE )
else:
A = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__SCREAMING_SNAKE_CASE , eps=1e-6 )
A = nn.SiLU()
A = nn.Convad(block_out_channels[0] , __SCREAMING_SNAKE_CASE , 3 , padding=1 )
A = False
def UpperCamelCase ( self : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : int=None ):
A = z
A = self.conv_in(__SCREAMING_SNAKE_CASE )
A = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ : List[Any] ):
def custom_forward(*UpperCamelCase__ : Any ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
A = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
else:
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
A = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
A = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
# middle
A = self.mid_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
A = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
A = up_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
A = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
else:
A = self.conv_norm_out(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
A = self.conv_act(__SCREAMING_SNAKE_CASE )
A = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : str=None , UpperCamelCase__ : List[str]="random" , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Optional[int]=True ):
super().__init__()
A = n_e
A = vq_embed_dim
A = beta
A = legacy
A = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
A = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
A = self.used.shape[0]
A = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A = self.re_embed
A = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
A = n_e
A = sane_index_shape
def UpperCamelCase ( self : str , UpperCamelCase__ : Tuple ):
A = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
A = inds.reshape(ishape[0] , -1 )
A = self.used.to(__SCREAMING_SNAKE_CASE )
A = (inds[:, :, None] == used[None, None, ...]).long()
A = match.argmax(-1 )
A = match.sum(2 ) < 1
if self.unknown_index == "random":
A = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
A = self.unknown_index
return new.reshape(__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Any ):
A = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
A = inds.reshape(ishape[0] , -1 )
A = self.used.to(__SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
A = 0 # simply set to zero
A = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __SCREAMING_SNAKE_CASE )
return back.reshape(__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] ):
# reshape z -> (batch, height, width, channel) and flatten
A = z.permute(0 , 2 , 3 , 1 ).contiguous()
A = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A = torch.argmin(torch.cdist(__SCREAMING_SNAKE_CASE , self.embedding.weight ) , dim=1 )
A = self.embedding(__SCREAMING_SNAKE_CASE ).view(z.shape )
A = None
A = None
# compute loss for embedding
if not self.legacy:
A = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A = z + (z_q - z).detach()
# reshape back to match original input shape
A = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
A = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
A = self.remap_to_used(__SCREAMING_SNAKE_CASE )
A = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
A = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A = indices.reshape(shape[0] , -1 ) # add batch axis
A = self.unmap_to_all(__SCREAMING_SNAKE_CASE )
A = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A = self.embedding(__SCREAMING_SNAKE_CASE )
if shape is not None:
A = z_q.view(__SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
A = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any]=False ):
A = parameters
A = torch.chunk(__SCREAMING_SNAKE_CASE , 2 , dim=1 )
A = torch.clamp(self.logvar , -30.0 , 20.0 )
A = deterministic
A = torch.exp(0.5 * self.logvar )
A = torch.exp(self.logvar )
if self.deterministic:
A = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : Optional[Any] = None ):
# make sure sample is on the same device as the parameters and has same dtype
A = randn_tensor(
self.mean.shape , generator=__SCREAMING_SNAKE_CASE , device=self.parameters.device , dtype=self.parameters.dtype )
A = self.mean + self.std * sample
return x
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : List[Any]=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def UpperCamelCase ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Dict=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
A = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Optional[Any] ):
return self.mean
| 699 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case__ : int = botoa.client("""iam""" )
snake_case__ : Union[str, Any] = {
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=__magic_name__ , AssumeRolePolicyDocument=json.dumps(__magic_name__ , indent=2 ) )
snake_case__ : Dict = {
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=__magic_name__ , PolicyName=f"{role_name}_policy_permission" , PolicyDocument=json.dumps(__magic_name__ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f"role {role_name} already exists. Using existing one" )
def UpperCamelCase__ ( __magic_name__ : Any ) -> Tuple:
'''simple docstring'''
snake_case__ : List[str] = botoa.client("""iam""" )
return iam_client.get_role(RoleName=__magic_name__ )["Role"]["Arn"]
def UpperCamelCase__ ( ) -> Tuple:
'''simple docstring'''
snake_case__ : Union[str, Any] = _ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , __magic_name__ , )
snake_case__ : List[Any] = None
if credentials_configuration == 0:
snake_case__ : Dict = _ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
snake_case__ : List[str] = aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
snake_case__ : List[str] = _ask_field("""AWS Access Key ID: """ )
snake_case__ : int = aws_access_key_id
snake_case__ : Optional[Any] = _ask_field("""AWS Secret Access Key: """ )
snake_case__ : List[str] = aws_secret_access_key
snake_case__ : Tuple = _ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
snake_case__ : Optional[int] = aws_region
snake_case__ : int = _ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , __magic_name__ , )
if role_management == 0:
snake_case__ : Optional[Any] = _ask_field("""Enter your IAM role name: """ )
else:
snake_case__ : Optional[int] = """accelerate_sagemaker_execution_role"""
print(f"Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials" )
_create_iam_role_for_sagemaker(__magic_name__ )
snake_case__ : Dict = _ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Any = None
if is_custom_docker_image:
snake_case__ : str = _ask_field("""Enter your Docker image: """ , lambda __magic_name__ : str(__magic_name__ ).lower() )
snake_case__ : Tuple = _ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : List[Any] = None
if is_sagemaker_inputs_enabled:
snake_case__ : str = _ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda __magic_name__ : str(__magic_name__ ).lower() , )
snake_case__ : Optional[int] = _ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Optional[Any] = None
if is_sagemaker_metrics_enabled:
snake_case__ : List[Any] = _ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda __magic_name__ : str(__magic_name__ ).lower() , )
snake_case__ : Tuple = _ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
snake_case__ : Any = {}
snake_case__ : List[Any] = _ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
if use_dynamo:
snake_case__ : str = """dynamo_"""
snake_case__ : Tuple = _ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
snake_case__ : List[str] = _ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
if use_custom_options:
snake_case__ : str = _ask_options(
"""Which mode do you want to use?""" , __magic_name__ , lambda __magic_name__ : TORCH_DYNAMO_MODES[int(__magic_name__ )] , default="""default""" , )
snake_case__ : Union[str, Any] = _ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : str = _ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Dict = """Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
snake_case__ : List[str] = _ask_options(
__magic_name__ , __magic_name__ , lambda __magic_name__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(__magic_name__ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
snake_case__ : Optional[int] = _ask_field(__magic_name__ , lambda __magic_name__ : str(__magic_name__ ).lower() , default="""ml.p3.2xlarge""" )
snake_case__ : Dict = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
snake_case__ : Optional[Any] = _ask_field(
"""How many machines do you want use? [1]: """ , __magic_name__ , default=1 , )
snake_case__ : Union[str, Any] = _ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=__magic_name__ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=__magic_name__ , use_cpu=__magic_name__ , dynamo_config=__magic_name__ , eca_instance_type=__magic_name__ , profile=__magic_name__ , region=__magic_name__ , iam_role_name=__magic_name__ , mixed_precision=__magic_name__ , num_machines=__magic_name__ , sagemaker_inputs_file=__magic_name__ , sagemaker_metrics_file=__magic_name__ , )
| 38 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class _A ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = tempfile.mkdtemp()
# fmt: off
__lowercase = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
__lowercase = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
__lowercase = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
__lowercase = {"""unk_token""": """<unk>"""}
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__SCREAMING_SNAKE_CASE ) )
__lowercase = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
__lowercase = os.path.join(self.tmpdirname , __SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _snake_case ( self : Any , **lowerCamelCase : int ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self : Optional[Any] , **lowerCamelCase : Tuple ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self : Tuple , **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowercase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
__lowercase = self.get_image_processor()
__lowercase = CLIPSegProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
__lowercase = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE )
__lowercase = CLIPSegProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
__lowercase = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , __SCREAMING_SNAKE_CASE )
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowercase = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__lowercase = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
__lowercase = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE )
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = CLIPSegProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(__SCREAMING_SNAKE_CASE , return_tensors="np" )
__lowercase = processor(images=__SCREAMING_SNAKE_CASE , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = CLIPSegProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__lowercase = """lower newer"""
__lowercase = processor(text=__SCREAMING_SNAKE_CASE )
__lowercase = tokenizer(__SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = CLIPSegProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__lowercase = """lower newer"""
__lowercase = self.prepare_image_inputs()
__lowercase = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE ):
processor()
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = CLIPSegProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__lowercase = self.prepare_image_inputs()
__lowercase = self.prepare_image_inputs()
__lowercase = processor(images=__SCREAMING_SNAKE_CASE , visual_prompt=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE ):
processor()
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = CLIPSegProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowercase = processor.batch_decode(__SCREAMING_SNAKE_CASE )
__lowercase = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 402 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def UpperCamelCase__ ( __magic_name__ : str = "laptop" ) -> DataFrame:
'''simple docstring'''
snake_case__ : Union[str, Any] = f"https://www.amazon.in/laptop/s?k={product}"
snake_case__ : List[str] = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
snake_case__ : int = BeautifulSoup(requests.get(__magic_name__ , headers=__magic_name__ ).text )
# Initialize a Pandas dataframe with the column titles
snake_case__ : Optional[Any] = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
snake_case__ : Optional[int] = item.ha.text
snake_case__ : Any = """https://www.amazon.in/""" + item.ha.a["""href"""]
snake_case__ : List[str] = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
snake_case__ : Dict = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
snake_case__ : Optional[int] = """Not available"""
try:
snake_case__ : Tuple = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
snake_case__ : Optional[Any] = """"""
try:
snake_case__ : str = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 1_00 )
except ValueError:
snake_case__ : List[Any] = float("""nan""" )
except AttributeError:
pass
snake_case__ : str = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
snake_case__ : List[Any] = """ """
snake_case__ : Union[str, Any] = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
A_ : int = "headphones"
get_amazon_product_data(product).to_csv(F'Amazon Product Data for {product}.csv')
| 38 | 0 |
class snake_case :
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase_ : str ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = arr.split(''',''' )
def _lowercase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [int(self.array[0] )] * len(self.array )
SCREAMING_SNAKE_CASE_ = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
SCREAMING_SNAKE_CASE_ = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
SCREAMING_SNAKE_CASE_ = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
A_ = input("please input some numbers:")
A_ = SubArray(whole_array)
A_ = array.solve_sub_array()
print(("the results is:", re))
| 393 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = LongformerTokenizer
lowerCamelCase__ = True
lowerCamelCase__ = LongformerTokenizerFast
lowerCamelCase__ = True
def __UpperCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
snake_case__ : Optional[int] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
snake_case__ : int = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case__ : Any = {"""unk_token""": """<unk>"""}
snake_case__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = """lower newer"""
snake_case__ : Dict = """lower newer"""
return input_text, output_text
def __UpperCamelCase ( self ):
snake_case__ : int = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case__ : Tuple = """lower newer"""
snake_case__ : Optional[Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
snake_case__ : Tuple = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) # , add_prefix_space=True)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokens + [tokenizer.unk_token]
snake_case__ : List[Any] = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
snake_case__ : int = tokenizer.encode("""sequence builders""" , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.get_tokenizer()
snake_case__ : int = """Encode this sequence."""
snake_case__ : Union[str, Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
snake_case__ : Optional[int] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
snake_case__ : List[str] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing spaces after special tokens
snake_case__ : List[str] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )} ) # mask token has a left space
snake_case__ : Dict = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
snake_case__ : str = """Encode <mask> sequence"""
snake_case__ : Tuple = """Encode <mask>sequence"""
snake_case__ : Union[str, Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = encoded.index(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer.encode(__SCREAMING_SNAKE_CASE )
snake_case__ : str = encoded.index(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = """A, <mask> AllenNLP sentence."""
snake_case__ : str = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
snake_case__ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
snake_case__ : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def __UpperCamelCase ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
snake_case__ : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
snake_case__ : List[str] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state["""trim_offsets"""] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : Union[str, Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
snake_case__ : Any = f"{text_of_1_token} {text_of_1_token}"
snake_case__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Tuple = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Optional[Any] = f" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
snake_case__ : Dict = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ) + 1, 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
| 38 | 0 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Any:
_lowerCamelCase : str = 0
if start < end:
_lowerCamelCase : str = randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Optional[Any] = a[end]
_lowerCamelCase : Dict = a[pivot]
_lowerCamelCase : Union[str, Any] = temp
_lowerCamelCase : List[Any] = _in_place_partition(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , p - 1 )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE_ , p + 1 , SCREAMING_SNAKE_CASE_ )
return count
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Union[str, Any]:
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : int = randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Any = a[end]
_lowerCamelCase : Dict = a[pivot]
_lowerCamelCase : List[Any] = temp
_lowerCamelCase : List[str] = start - 1
for index in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_lowerCamelCase : Dict = new_pivot_index + 1
_lowerCamelCase : Union[str, Any] = a[new_pivot_index]
_lowerCamelCase : int = a[index]
_lowerCamelCase : str = temp
_lowerCamelCase : List[Any] = a[new_pivot_index + 1]
_lowerCamelCase : Optional[int] = a[end]
_lowerCamelCase : List[Any] = temp
return new_pivot_index + 1, count
SCREAMING_SNAKE_CASE__ : Dict =TemporaryFile()
SCREAMING_SNAKE_CASE__ : str =100 # 1000 elements are to be sorted
SCREAMING_SNAKE_CASE__ : Union[str, Any] =0, 1 # mean and standard deviation
SCREAMING_SNAKE_CASE__ : Optional[Any] =np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
SCREAMING_SNAKE_CASE__ : List[str] =np.load(outfile)
SCREAMING_SNAKE_CASE__ : List[Any] =len(M) - 1
SCREAMING_SNAKE_CASE__ : List[Any] =_in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 434 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : Any = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''resnet'''
lowerCamelCase__ = ['''basic''', '''bottleneck''']
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE="bottleneck" , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
snake_case__ : List[Any] = num_channels
snake_case__ : str = embedding_size
snake_case__ : List[Any] = hidden_sizes
snake_case__ : Dict = depths
snake_case__ : List[Any] = layer_type
snake_case__ : int = hidden_act
snake_case__ : Union[str, Any] = downsample_in_first_stage
snake_case__ : Dict = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
snake_case__ , snake_case__ : Any = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1e-3
| 38 | 0 |
from __future__ import annotations
snake_case : str = "Muhammad Umer Farooq"
snake_case : Optional[Any] = "MIT"
snake_case : int = "1.0.0"
snake_case : int = "Muhammad Umer Farooq"
snake_case : int = "[email protected]"
snake_case : Dict = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class snake_case_ (__SCREAMING_SNAKE_CASE ):
def __init__( self :Any ,__snake_case :int ) -> List[Any]:
super().__init__()
a__ = []
a__ = domain
def lowerCamelCase__( self :Optional[int] ,__snake_case :Optional[int] ,__snake_case :Tuple ) -> int:
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
a__ = parse.urljoin(self.domain ,__SCREAMING_SNAKE_CASE )
self.urls.append(__SCREAMING_SNAKE_CASE )
def __lowercase ( __lowerCAmelCase : str ):
return ".".join(get_sub_domain_name(__lowerCAmelCase ).split('.' )[-2:] )
def __lowercase ( __lowerCAmelCase : str ):
return parse.urlparse(__lowerCAmelCase ).netloc
def __lowercase ( __lowerCAmelCase : str = "https://github.com" ):
a__ = get_domain_name(__lowerCAmelCase )
# Initialize the parser
a__ = Parser(__lowerCAmelCase )
try:
# Open URL
a__ = requests.get(__lowerCAmelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
a__ = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
a__ = requests.get(__lowerCAmelCase )
# Get the valid email.
a__ = re.findall('[a-zA-Z0-9]+@' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__lowerCAmelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__lowerCAmelCase )
if __name__ == "__main__":
snake_case : str = emails_from_url('''https://github.com''')
print(f"""{len(emails)} emails found:""")
print('''\n'''.join(sorted(emails)))
| 335 |
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 38 | 0 |
import os
import sys
import unittest
A__ : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
A__ : Dict = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""")
A__ : str = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""")
class lowercase ( unittest.TestCase ):
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = get_test_to_tester_mapping(__SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[str] = get_test_to_tester_mapping(__SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : int = {"""BertModelTest""": """BertModelTester"""}
lowerCAmelCase__ : List[str] = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Dict = get_model_to_test_mapping(__SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[Any] = get_model_to_test_mapping(__SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Dict = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
lowerCAmelCase__ : Any = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = get_model_to_tester_mapping(__SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[int] = get_model_to_tester_mapping(__SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Any = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
lowerCAmelCase__ : Optional[Any] = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
| 233 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ):
snake_case__ : str = []
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_init_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_train_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_train_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_epoch_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_epoch_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_step_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_step_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_evaluate""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_predict""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_save""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_log""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_prediction_step""" )
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Tuple = tempfile.mkdtemp()
def __UpperCamelCase ( self ):
shutil.rmtree(self.output_dir )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
snake_case__ : List[Any] = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionModelConfig(a=__SCREAMING_SNAKE_CASE , b=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionPreTrainedModel(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = TrainingArguments(self.output_dir , disable_tqdm=__SCREAMING_SNAKE_CASE , report_to=[] , **__SCREAMING_SNAKE_CASE )
return Trainer(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , train_dataset=__SCREAMING_SNAKE_CASE , eval_dataset=__SCREAMING_SNAKE_CASE , callbacks=__SCREAMING_SNAKE_CASE , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
# Order doesn't matter
snake_case__ : Tuple = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
snake_case__ : List[str] = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
for cba, cba in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , cba.__class__ )
elif not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(cba.__class__ , __SCREAMING_SNAKE_CASE )
else:
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = ["""on_init_end""", """on_train_begin"""]
snake_case__ : Union[str, Any] = 0
snake_case__ : Dict = len(trainer.get_eval_dataloader() )
snake_case__ : Any = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("""on_epoch_begin""" )
for _ in range(__SCREAMING_SNAKE_CASE ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""" )
expected_events.append("""on_epoch_end""" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __UpperCamelCase ( self ):
snake_case__ : Any = self.get_trainer()
snake_case__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# Callbacks passed at init are added to the default callbacks
snake_case__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case__ : Optional[Any] = self.get_trainer(disable_tqdm=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case__ : int = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.remove(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = self.get_trainer()
snake_case__ : List[str] = trainer.pop_callback(__SCREAMING_SNAKE_CASE )
self.assertEqual(cb.__class__ , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
trainer.add_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# We can also add, pop, or remove by instance
snake_case__ : List[Any] = self.get_trainer()
snake_case__ : List[str] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.remove(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = self.get_trainer()
snake_case__ : Any = trainer.callback_handler.callbacks[0]
snake_case__ : Optional[Any] = trainer.pop_callback(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
trainer.add_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# Independent log/save/eval
snake_case__ : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" )
trainer.train()
snake_case__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# A bit of everything
snake_case__ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
snake_case__ : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock:
snake_case__ : List[str] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(__SCREAMING_SNAKE_CASE ) in warn_mock.call_args[0][0]
| 38 | 0 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class _UpperCamelCase( __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE : List[str] = '''Wav2Vec2FeatureExtractor'''
__SCREAMING_SNAKE_CASE : Optional[int] = '''AutoTokenizer'''
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a : Optional[int] = self.feature_extractor
__a : Optional[Any] = False
@classmethod
def __lowerCAmelCase ( cls : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
try:
return super().from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
except OSError:
warnings.warn(
f'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
' include a `tokenizer_class` attribute is deprecated and will be '
'removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'
' attribute to either your `config.json` or `tokenizer_config.json` '
'file to suppress this warning: ' , __SCREAMING_SNAKE_CASE , )
__a : List[str] = WavaVecaFeatureExtractor.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__a : Optional[Any] = WavaVecaCTCTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
return cls(feature_extractor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE )
def __call__( self : List[str] , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
__a : Optional[Any] = kwargs.pop('raw_speech' )
else:
__a : Union[str, Any] = kwargs.pop('audio' , __SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = kwargs.pop('sampling_rate' , __SCREAMING_SNAKE_CASE )
__a : Dict = kwargs.pop('text' , __SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
__a : Optional[Any] = args[0]
__a : Dict = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
__a : Optional[int] = self.feature_extractor(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is not None:
__a : Any = self.tokenizer(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__a : List[Any] = encodings["""input_ids"""]
return inputs
def __lowerCAmelCase ( self : Any , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor.pad(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__a : Any = kwargs.pop('input_features' , __SCREAMING_SNAKE_CASE )
__a : str = kwargs.pop('labels' , __SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
__a : Dict = args[0]
__a : int = args[1:]
if input_features is not None:
__a : Any = self.feature_extractor.pad(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if labels is not None:
__a : List[Any] = self.tokenizer.pad(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__a : Tuple = labels["""input_ids"""]
return input_features
def __lowerCAmelCase ( self : Any , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@contextmanager
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
__a : List[str] = True
__a : Any = self.tokenizer
yield
__a : str = self.feature_extractor
__a : str = False
| 47 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=1_8 , __SCREAMING_SNAKE_CASE=3_0 , __SCREAMING_SNAKE_CASE=4_0_0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ):
snake_case__ : Any = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case__ : List[Any] = parent
snake_case__ : int = batch_size
snake_case__ : List[Any] = num_channels
snake_case__ : str = image_size
snake_case__ : Union[str, Any] = min_resolution
snake_case__ : List[Any] = max_resolution
snake_case__ : Tuple = do_resize
snake_case__ : int = size
snake_case__ : Tuple = do_normalize
snake_case__ : Dict = image_mean
snake_case__ : Union[str, Any] = image_std
def __UpperCamelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = DPTImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self ):
snake_case__ : str = DPTImageProcessingTester(self )
@property
def __UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_std""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """size""" ) )
def __UpperCamelCase ( self ):
snake_case__ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} )
snake_case__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
snake_case__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
snake_case__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : Any = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
snake_case__ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 38 | 0 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
_a : Union[str, Any] = False, False, False
@dataclass
class _lowercase :
_SCREAMING_SNAKE_CASE : Optional[Any] = None
_SCREAMING_SNAKE_CASE : Optional[Any] = True
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : Any = None
# Automatically constructed
_SCREAMING_SNAKE_CASE : Optional[Any] = "dict"
_SCREAMING_SNAKE_CASE : Tuple = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
_SCREAMING_SNAKE_CASE : List[str] = field(default="Audio" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __call__( self : Any ) -> int:
return self.pa_type
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : List[str] ) -> Tuple:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('To support encoding audio data, please install \'soundfile\'.' ) from err
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return {"bytes": None, "path": value}
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
__snake_case = BytesIO()
sf.write(__SCREAMING_SNAKE_CASE , value['array'] , value['sampling_rate'] , format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('pcm' ):
# "PCM" only has raw audio bytes
if value.get('sampling_rate' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('To use PCM files, please specify a \'sampling_rate\' in Audio object' )
if value.get('bytes' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
__snake_case = np.frombuffer(value['bytes'] , dtype=np.intaa ).astype(np.floataa ) / 3_2767
else:
__snake_case = np.memmap(value['path'] , dtype='h' , mode='r' ).astype(np.floataa ) / 3_2767
__snake_case = BytesIO(bytes() )
sf.write(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , value['sampling_rate'] , format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
f'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any = None ) -> Tuple:
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Audio(decode=True) instead.' )
__snake_case = (value["""path"""], BytesIO(value['bytes'] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(f'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('To support decoding audio files, please install \'librosa\' and \'soundfile\'.' ) from err
__snake_case = xsplitext(__SCREAMING_SNAKE_CASE )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '
'You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. ' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '
'You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. ' )
if file is None:
__snake_case = token_per_repo_id or {}
__snake_case = path.split('::' )[-1]
try:
__snake_case = string_to_dict(__SCREAMING_SNAKE_CASE , config.HUB_DATASETS_URL )["""repo_id"""]
__snake_case = token_per_repo_id[repo_id]
except (ValueError, KeyError):
__snake_case = None
with xopen(__SCREAMING_SNAKE_CASE , 'rb' , use_auth_token=__SCREAMING_SNAKE_CASE ) as f:
__snake_case = sf.read(__SCREAMING_SNAKE_CASE )
else:
__snake_case = sf.read(__SCREAMING_SNAKE_CASE )
__snake_case = array.T
if self.mono:
__snake_case = librosa.to_mono(__SCREAMING_SNAKE_CASE )
if self.sampling_rate and self.sampling_rate != sampling_rate:
__snake_case = librosa.resample(__SCREAMING_SNAKE_CASE , orig_sr=__SCREAMING_SNAKE_CASE , target_sr=self.sampling_rate )
__snake_case = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def a ( self : Any ) -> Union[str, Any]:
from .features import Value
if self.decode:
raise ValueError('Cannot flatten a decoded Audio feature.' )
return {
"bytes": Value('binary' ),
"path": Value('string' ),
}
def a ( self : List[str] , SCREAMING_SNAKE_CASE_ : Tuple ) -> List[Any]:
if pa.types.is_string(storage.type ):
__snake_case = pa.array([None] * len(__SCREAMING_SNAKE_CASE ) , type=pa.binary() )
__snake_case = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__snake_case = pa.array([None] * len(__SCREAMING_SNAKE_CASE ) , type=pa.string() )
__snake_case = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('array' ):
__snake_case = pa.array([Audio().encode_example(__SCREAMING_SNAKE_CASE ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
__snake_case = storage.field('bytes' )
else:
__snake_case = pa.array([None] * len(__SCREAMING_SNAKE_CASE ) , type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
__snake_case = storage.field('path' )
else:
__snake_case = pa.array([None] * len(__SCREAMING_SNAKE_CASE ) , type=pa.string() )
__snake_case = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
return array_cast(__SCREAMING_SNAKE_CASE , self.pa_type )
def a ( self : int , SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]:
@no_op_if_value_is_null
def path_to_bytes(SCREAMING_SNAKE_CASE_ : str ):
with xopen(__SCREAMING_SNAKE_CASE , 'rb' ) as f:
__snake_case = f.read()
return bytes_
__snake_case = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__snake_case = pa.array(
[os.path.basename(__SCREAMING_SNAKE_CASE ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , )
__snake_case = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(__SCREAMING_SNAKE_CASE , self.pa_type )
| 56 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """embed_dim""" ) )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """num_heads""" ) )
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[1_6, 4_8, 9_6] , __SCREAMING_SNAKE_CASE=[1, 3, 6] , __SCREAMING_SNAKE_CASE=[1, 2, 1_0] , __SCREAMING_SNAKE_CASE=[7, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2] , __SCREAMING_SNAKE_CASE=[2, 1, 1] , __SCREAMING_SNAKE_CASE=[2, 2, 2] , __SCREAMING_SNAKE_CASE=[False, False, True] , __SCREAMING_SNAKE_CASE=[0.0, 0.0, 0.0] , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-1_2 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=2 , ):
snake_case__ : List[str] = parent
snake_case__ : Tuple = batch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : List[Any] = patch_sizes
snake_case__ : Optional[int] = patch_stride
snake_case__ : Optional[Any] = patch_padding
snake_case__ : Any = is_training
snake_case__ : int = use_labels
snake_case__ : Dict = num_labels
snake_case__ : Optional[Any] = num_channels
snake_case__ : Optional[Any] = embed_dim
snake_case__ : Optional[int] = num_heads
snake_case__ : Optional[int] = stride_kv
snake_case__ : int = depth
snake_case__ : Optional[Any] = cls_token
snake_case__ : List[Any] = attention_drop_rate
snake_case__ : Union[str, Any] = initializer_range
snake_case__ : List[Any] = layer_norm_eps
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : List[Any] = None
if self.use_labels:
# create a random int32 tensor of given shape
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : List[str] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = TFCvtModel(config=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = (self.image_size, self.image_size)
snake_case__ , snake_case__ : str = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
snake_case__ : Any = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
snake_case__ : Optional[int] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = self.num_labels
snake_case__ : str = TFCvtForImageClassification(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Any = config_and_inputs
snake_case__ : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
lowerCamelCase__ = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = TFCvtModelTester(self )
snake_case__ : Any = TFCvtConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def __UpperCamelCase ( self ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def __UpperCamelCase ( self ):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def __UpperCamelCase ( self ):
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def __UpperCamelCase ( self ):
snake_case__ : List[str] = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(__SCREAMING_SNAKE_CASE )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Optional[Any] = [*signature.parameters.keys()]
snake_case__ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
def check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[int] = outputs.hidden_states
snake_case__ : Tuple = len(self.model_tester.depth )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[Any] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : List[str] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def __UpperCamelCase ( self ):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : str = TFCvtModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self ):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case__ : Union[str, Any] = self.default_image_processor
snake_case__ : int = prepare_img()
snake_case__ : Dict = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""tf""" )
# forward pass
snake_case__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
snake_case__ : str = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : int = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 38 | 0 |
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=0 ) -> Optional[Any]:
UpperCamelCase_ = 1.0 if scale is None else scale
UpperCamelCase_ = 0.0 if loc is None else loc
super().__init__(__SCREAMING_SNAKE_CASE , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__SCREAMING_SNAKE_CASE )] )
@property
def _UpperCAmelCase ( self ) -> List[str]:
return self.base_dist.mean * self.scale + self.loc
@property
def _UpperCAmelCase ( self ) -> List[str]:
return self.base_dist.variance * self.scale**2
@property
def _UpperCAmelCase ( self ) -> Dict:
return self.variance.sqrt()
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> Dict:
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = args_dim
UpperCamelCase_ = nn.ModuleList([nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for dim in args_dim.values()] )
UpperCamelCase_ = domain_map
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> str:
UpperCamelCase_ = [proj(__SCREAMING_SNAKE_CASE ) for proj in self.proj]
return self.domain_map(*__SCREAMING_SNAKE_CASE )
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ) -> str:
super().__init__()
UpperCamelCase_ = function
def _UpperCAmelCase ( self , _UpperCAmelCase , *_UpperCAmelCase ) -> int:
return self.function(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE )
class _a :
"""simple docstring"""
A_ = 42
A_ = 42
A_ = 42
def __init__( self , _UpperCAmelCase = 1 ) -> Tuple:
UpperCamelCase_ = dim
UpperCamelCase_ = {k: dim * self.args_dim[k] for k in self.args_dim}
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Tuple:
if self.dim == 1:
return self.distribution_class(*__SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(*__SCREAMING_SNAKE_CASE ) , 1 )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , ) -> Optional[int]:
UpperCamelCase_ = self._base_distribution(__SCREAMING_SNAKE_CASE )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__SCREAMING_SNAKE_CASE , loc=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , event_dim=self.event_dim )
@property
def _UpperCAmelCase ( self ) -> List[str]:
return () if self.dim == 1 else (self.dim,)
@property
def _UpperCAmelCase ( self ) -> str:
return len(self.event_shape )
@property
def _UpperCAmelCase ( self ) -> List[Any]:
return 0.0
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Union[str, Any]:
return ParameterProjection(
in_features=__SCREAMING_SNAKE_CASE , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _UpperCAmelCase ( self , *_UpperCAmelCase ) -> int:
raise NotImplementedError()
@staticmethod
def _UpperCAmelCase ( _UpperCAmelCase ) -> Optional[int]:
return (x + torch.sqrt(torch.square(__SCREAMING_SNAKE_CASE ) + 4.0 )) / 2.0
class _a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A_ = {"""df""": 1, """loc""": 1, """scale""": 1}
A_ = StudentT
@classmethod
def _UpperCAmelCase ( cls , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
UpperCamelCase_ = cls.squareplus(__SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
UpperCamelCase_ = 2.0 + cls.squareplus(__SCREAMING_SNAKE_CASE )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A_ = {"""loc""": 1, """scale""": 1}
A_ = Normal
@classmethod
def _UpperCAmelCase ( cls , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = cls.squareplus(__SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A_ = {"""total_count""": 1, """logits""": 1}
A_ = NegativeBinomial
@classmethod
def _UpperCAmelCase ( cls , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
UpperCamelCase_ = cls.squareplus(__SCREAMING_SNAKE_CASE )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__SCREAMING_SNAKE_CASE , logits=__SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(total_count=__SCREAMING_SNAKE_CASE , logits=__SCREAMING_SNAKE_CASE ) , 1 )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None ) -> List[str]:
UpperCamelCase_ = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 23 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
snake_case__ : int = [[1, 2, 4], [1, 2, 3, 4]]
snake_case__ : Any = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
self.assertTrue(isinstance(dc.token_ids , __SCREAMING_SNAKE_CASE ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __UpperCamelCase ( self ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
snake_case__ : Union[str, Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) # fails here
def __UpperCamelCase ( self ):
snake_case__ : List[str] = [[1, 2, 3], [1, 2, 4]]
snake_case__ : Optional[int] = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ , snake_case__ : Any = dc.update(1 )
snake_case__ : Any = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : Tuple = dc.update(2 )
snake_case__ : Tuple = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = dc.update(3 )
snake_case__ : List[str] = stepped is True and completed is True and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
snake_case__ : int = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ , snake_case__ : str = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : str = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : Dict = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 38 | 0 |
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
UpperCAmelCase__ : int = threading.Lock()
UpperCAmelCase__ : Optional[logging.Handler] = None
UpperCAmelCase__ : str = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
UpperCAmelCase__ : Dict = logging.WARNING
UpperCAmelCase__ : Tuple = True
def _lowercase ( ) -> int:
UpperCamelCase__ : Dict = os.getenv('TRANSFORMERS_VERBOSITY' , __SCREAMING_SNAKE_CASE )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
F"""has to be one of: { ", ".join(log_levels.keys() ) }""" )
return _default_log_level
def _lowercase ( ) -> str:
return __name__.split('.' )[0]
def _lowercase ( ) -> logging.Logger:
return logging.getLogger(_get_library_name() )
def _lowercase ( ) -> None:
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
UpperCamelCase__ : int = logging.StreamHandler() # Set sys.stderr as stream.
UpperCamelCase__ : Optional[int] = sys.stderr.flush
# Apply our default configuration to the library root logger.
UpperCamelCase__ : Dict = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
UpperCamelCase__ : List[str] = False
def _lowercase ( ) -> None:
global _default_handler
with _lock:
if not _default_handler:
return
UpperCamelCase__ : Optional[Any] = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
UpperCamelCase__ : str = None
def _lowercase ( ) -> List[str]:
return log_levels
def _lowercase ( __SCREAMING_SNAKE_CASE = None ) -> logging.Logger:
if name is None:
UpperCamelCase__ : Tuple = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(__SCREAMING_SNAKE_CASE )
def _lowercase ( ) -> int:
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def _lowercase ( __SCREAMING_SNAKE_CASE ) -> None:
_configure_library_root_logger()
_get_library_root_logger().setLevel(__SCREAMING_SNAKE_CASE )
def _lowercase ( ) -> List[Any]:
return set_verbosity(__SCREAMING_SNAKE_CASE )
def _lowercase ( ) -> List[str]:
return set_verbosity(__SCREAMING_SNAKE_CASE )
def _lowercase ( ) -> Optional[Any]:
return set_verbosity(__SCREAMING_SNAKE_CASE )
def _lowercase ( ) -> Any:
return set_verbosity(__SCREAMING_SNAKE_CASE )
def _lowercase ( ) -> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def _lowercase ( ) -> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def _lowercase ( __SCREAMING_SNAKE_CASE ) -> None:
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(__SCREAMING_SNAKE_CASE )
def _lowercase ( __SCREAMING_SNAKE_CASE ) -> None:
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(__SCREAMING_SNAKE_CASE )
def _lowercase ( ) -> None:
_configure_library_root_logger()
UpperCamelCase__ : List[str] = False
def _lowercase ( ) -> None:
_configure_library_root_logger()
UpperCamelCase__ : str = True
def _lowercase ( ) -> None:
UpperCamelCase__ : Union[str, Any] = _get_library_root_logger().handlers
for handler in handlers:
UpperCamelCase__ : Optional[int] = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(__SCREAMING_SNAKE_CASE )
def _lowercase ( ) -> None:
UpperCamelCase__ : Any = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Any:
UpperCamelCase__ : Dict = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , __SCREAMING_SNAKE_CASE )
if no_advisory_warnings:
return
self.warning(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : List[str] = warning_advice
@functools.lru_cache(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Dict:
self.warning(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Dict = warning_once
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , *UpperCamelCase , **UpperCamelCase) -> Optional[Any]: # pylint: disable=unused-argument
UpperCamelCase__ : List[str] = args[0] if args else None
def __iter__( self) -> Union[str, Any]:
return iter(self._iterator)
def __getattr__( self , UpperCamelCase) -> Dict:
def empty_fn(*UpperCamelCase , **UpperCamelCase): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self) -> Tuple:
return self
def __exit__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase) -> Dict:
return
class UpperCamelCase_ :
'''simple docstring'''
def __call__( self , *UpperCamelCase , **UpperCamelCase) -> int:
if _tqdm_active:
return tqdm_lib.tqdm(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
else:
return EmptyTqdm(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def lowerCAmelCase__ ( self , *UpperCamelCase , **UpperCamelCase) -> Optional[Any]:
UpperCamelCase__ : str = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def lowerCAmelCase__ ( self) -> Optional[Any]:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCAmelCase__ : List[str] = _tqdm_cls()
def _lowercase ( ) -> bool:
global _tqdm_active
return bool(_tqdm_active )
def _lowercase ( ) -> List[Any]:
global _tqdm_active
UpperCamelCase__ : List[str] = True
hf_hub_utils.enable_progress_bars()
def _lowercase ( ) -> Any:
global _tqdm_active
UpperCamelCase__ : List[str] = False
hf_hub_utils.disable_progress_bars()
| 410 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Optional[int] = logging.get_logger(__name__)
A_ : Tuple = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''segformer'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[2, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[8, 4, 2, 1] , __SCREAMING_SNAKE_CASE=[3_2, 6_4, 1_6_0, 2_5_6] , __SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[1, 2, 5, 8] , __SCREAMING_SNAKE_CASE=[4, 4, 4, 4] , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1e-6 , __SCREAMING_SNAKE_CASE=2_5_6 , __SCREAMING_SNAKE_CASE=2_5_5 , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __SCREAMING_SNAKE_CASE , )
snake_case__ : Dict = num_channels
snake_case__ : Optional[Any] = num_encoder_blocks
snake_case__ : Any = depths
snake_case__ : Optional[int] = sr_ratios
snake_case__ : Tuple = hidden_sizes
snake_case__ : List[str] = patch_sizes
snake_case__ : str = strides
snake_case__ : Optional[int] = mlp_ratios
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : Dict = hidden_act
snake_case__ : Optional[int] = hidden_dropout_prob
snake_case__ : List[str] = attention_probs_dropout_prob
snake_case__ : List[Any] = classifier_dropout_prob
snake_case__ : int = initializer_range
snake_case__ : List[str] = drop_path_rate
snake_case__ : int = layer_norm_eps
snake_case__ : List[Any] = decoder_hidden_size
snake_case__ : List[Any] = kwargs.get("""reshape_last_stage""" , __SCREAMING_SNAKE_CASE )
snake_case__ : Dict = semantic_loss_ignore_index
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1e-4
@property
def __UpperCamelCase ( self ):
return 1_2
| 38 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class A_ ( unittest.TestCase ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int]=7 , __SCREAMING_SNAKE_CASE : int=3 , __SCREAMING_SNAKE_CASE : Dict=30 , __SCREAMING_SNAKE_CASE : Optional[int]=4_00 , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : List[str]=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : int=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : int=1 / 2_55 , __SCREAMING_SNAKE_CASE : Tuple=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__a = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
__a = parent
__a = batch_size
__a = num_channels
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_normalize
__a = image_mean
__a = image_std
__a = do_rescale
__a = rescale_factor
__a = do_pad
def _UpperCAmelCase ( self : str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _UpperCAmelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any]=False ):
if not batched:
__a = image_inputs[0]
if isinstance(__SCREAMING_SNAKE_CASE , Image.Image ):
__a = image.size
else:
__a = image.shape[1], image.shape[2]
if w < h:
__a = int(self.size["shortest_edge"] * h / w )
__a = self.size["""shortest_edge"""]
elif w > h:
__a = self.size["""shortest_edge"""]
__a = int(self.size["shortest_edge"] * w / h )
else:
__a = self.size["""shortest_edge"""]
__a = self.size["""shortest_edge"""]
else:
__a = []
for image in image_inputs:
__a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : item[0] )[0]
__a = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = DetaImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self : int ):
__a = DetaImageProcessingTester(self )
@property
def _UpperCAmelCase ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self : Any ):
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , "image_mean" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , "image_std" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , "do_normalize" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , "do_resize" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , "do_rescale" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , "do_pad" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , "size" ) )
def _UpperCAmelCase ( self : List[str] ):
__a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , __SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : int ):
pass
def _UpperCAmelCase ( self : str ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
__a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCAmelCase ( self : str ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
__a = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCAmelCase ( self : str ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
__a = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _UpperCAmelCase ( self : Optional[Any] ):
# prepare image and target
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
__a = json.loads(f.read() )
__a = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
__a = DetaImageProcessor()
__a = image_processing(images=__SCREAMING_SNAKE_CASE , annotations=__SCREAMING_SNAKE_CASE , return_tensors="pt" )
# verify pixel values
__a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __SCREAMING_SNAKE_CASE )
__a = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
__a = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __SCREAMING_SNAKE_CASE ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __SCREAMING_SNAKE_CASE )
__a = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __SCREAMING_SNAKE_CASE ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __SCREAMING_SNAKE_CASE ) )
# verify class_labels
__a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __SCREAMING_SNAKE_CASE ) )
# verify orig_size
__a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __SCREAMING_SNAKE_CASE ) )
# verify size
__a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __SCREAMING_SNAKE_CASE ) )
@slow
def _UpperCAmelCase ( self : Optional[int] ):
# prepare image, target and masks_path
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
__a = json.loads(f.read() )
__a = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
__a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
__a = DetaImageProcessor(format="coco_panoptic" )
__a = image_processing(images=__SCREAMING_SNAKE_CASE , annotations=__SCREAMING_SNAKE_CASE , masks_path=__SCREAMING_SNAKE_CASE , return_tensors="pt" )
# verify pixel values
__a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __SCREAMING_SNAKE_CASE )
__a = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
__a = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __SCREAMING_SNAKE_CASE ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __SCREAMING_SNAKE_CASE )
__a = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __SCREAMING_SNAKE_CASE ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __SCREAMING_SNAKE_CASE ) )
# verify class_labels
__a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __SCREAMING_SNAKE_CASE ) )
# verify masks
__a = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __SCREAMING_SNAKE_CASE )
# verify orig_size
__a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __SCREAMING_SNAKE_CASE ) )
# verify size
__a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __SCREAMING_SNAKE_CASE ) )
| 197 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : List[Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = None
if token is not None:
snake_case__ : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : List[Any] = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : str = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Tuple = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : Dict = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
snake_case__ : Union[str, Any] = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : Dict = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Dict = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Dict = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : Any = result.headers["""Location"""]
snake_case__ : Tuple = requests.get(__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : int = os.path.join(__magic_name__ , f"{artifact_name}.zip" )
with open(__magic_name__ , """wb""" ) as fp:
fp.write(response.content )
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : str=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Any = []
snake_case__ : Union[str, Any] = []
snake_case__ : Any = None
with zipfile.ZipFile(__magic_name__ ) as z:
for filename in z.namelist():
if not os.path.isdir(__magic_name__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__magic_name__ ) as f:
for line in f:
snake_case__ : Any = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
snake_case__ : str = line[: line.index(""": """ )]
snake_case__ : Optional[int] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
snake_case__ : Dict = line[len("""FAILED """ ) :]
failed_tests.append(__magic_name__ )
elif filename == "job_name.txt":
snake_case__ : Optional[Any] = line
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError(
f"`errors` and `failed_tests` should have the same number of elements. Got {len(__magic_name__ )} for `errors` "
f"and {len(__magic_name__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
""" problem.""" )
snake_case__ : Optional[Any] = None
if job_name and job_links:
snake_case__ : Optional[Any] = job_links.get(__magic_name__ , __magic_name__ )
# A list with elements of the form (line of error, error, failed test)
snake_case__ : List[Any] = [x + [y] + [job_link] for x, y in zip(__magic_name__ , __magic_name__ )]
return result
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : Union[str, Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = []
snake_case__ : Dict = [os.path.join(__magic_name__ , __magic_name__ ) for p in os.listdir(__magic_name__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__magic_name__ , job_links=__magic_name__ ) )
return errors
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : str=None ) -> List[Any]:
'''simple docstring'''
snake_case__ : Any = Counter()
counter.update([x[1] for x in logs] )
snake_case__ : Dict = counter.most_common()
snake_case__ : Any = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
snake_case__ : int = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case__ : str = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
snake_case__ : Tuple = test.split("""/""" )[2]
else:
snake_case__ : Any = None
return test
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : Union[str, Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : List[str] = [(x[0], x[1], get_model(x[2] )) for x in logs]
snake_case__ : List[Any] = [x for x in logs if x[2] is not None]
snake_case__ : Any = {x[2] for x in logs}
snake_case__ : Optional[Any] = {}
for test in tests:
snake_case__ : str = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
snake_case__ : Optional[int] = counter.most_common()
snake_case__ : Optional[int] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
snake_case__ : int = sum(error_counts.values() )
if n_errors > 0:
snake_case__ : str = {"""count""": n_errors, """errors""": error_counts}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : int ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Optional[Any] = """| no. | error | status |"""
snake_case__ : int = """|-:|:-|:-|"""
snake_case__ : int = [header, sep]
for error in reduced_by_error:
snake_case__ : Union[str, Any] = reduced_by_error[error]["""count"""]
snake_case__ : Dict = f"| {count} | {error[:1_00]} | |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : Dict ) -> List[Any]:
'''simple docstring'''
snake_case__ : List[Any] = """| model | no. of errors | major error | count |"""
snake_case__ : Optional[int] = """|-:|-:|-:|-:|"""
snake_case__ : Dict = [header, sep]
for model in reduced_by_model:
snake_case__ : Tuple = reduced_by_model[model]["""count"""]
snake_case__ , snake_case__ : Tuple = list(reduced_by_model[model]["""errors"""].items() )[0]
snake_case__ : Optional[int] = f"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
A_ : int = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
A_ : Optional[int] = get_job_links(args.workflow_run_id, token=args.token)
A_ : Optional[Any] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
A_ : int = k.find(" / ")
A_ : List[Any] = k[index + len(" / ") :]
A_ : List[str] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
A_ : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
A_ : str = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
A_ : List[str] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
A_ : Any = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
A_ : Any = reduce_by_error(errors)
A_ : Union[str, Any] = reduce_by_model(errors)
A_ : Any = make_github_table(reduced_by_error)
A_ : Optional[Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 38 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase = {
"configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
"tokenization_lxmert": ["LxmertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["LxmertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 699 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
A_ : Tuple = get_logger(__name__)
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = '''dummy_data'''
lowerCamelCase__ = '''datasets'''
lowerCamelCase__ = False
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , ):
snake_case__ : List[Any] = 0
snake_case__ : Union[str, Any] = dataset_name
snake_case__ : Optional[int] = cache_dir
snake_case__ : Union[str, Any] = use_local_dummy_data
snake_case__ : int = config
# download_callbacks take a single url as input
snake_case__ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
snake_case__ : Union[str, Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
snake_case__ : Union[str, Any] = str(__SCREAMING_SNAKE_CASE )
# to be downloaded
snake_case__ : List[str] = None
snake_case__ : List[str] = None
@property
def __UpperCamelCase ( self ):
if self._dummy_file is None:
snake_case__ : List[str] = self.download_dummy_data()
return self._dummy_file
@property
def __UpperCamelCase ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def __UpperCamelCase ( self ):
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
snake_case__ : Optional[int] = cached_path(
__SCREAMING_SNAKE_CASE , cache_dir=self.cache_dir , extract_compressed_file=__SCREAMING_SNAKE_CASE , force_extract=__SCREAMING_SNAKE_CASE )
return os.path.join(__SCREAMING_SNAKE_CASE , self.dummy_file_name )
@property
def __UpperCamelCase ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __UpperCamelCase ( self ):
if self._bucket_url is None:
snake_case__ : List[str] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def __UpperCamelCase ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
snake_case__ : List[Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
snake_case__ : List[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.create_dummy_data_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
return self.create_dummy_data_list(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
return self.create_dummy_data_single(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return path
def __UpperCamelCase ( self ):
return {}
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for single_url in single_urls:
download_callback(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : List[str] = single_urls
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = [os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) ) for x in single_urls]
else:
snake_case__ : List[Any] = single_urls
snake_case__ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) )
snake_case__ : Optional[int] = value
# make sure that values are unique
if all(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
snake_case__ : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
snake_case__ : Tuple = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , __SCREAMING_SNAKE_CASE ) ) for url in data_url )
snake_case__ : List[Any] = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
snake_case__ : List[str] = [data_url[0]] * len(__SCREAMING_SNAKE_CASE )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : List[Any] = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(__SCREAMING_SNAKE_CASE )
return dummy_data_list
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : Any = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(__SCREAMING_SNAKE_CASE ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
def _iter_archive_members(__SCREAMING_SNAKE_CASE ):
# this preserves the order of the members inside the ZIP archive
snake_case__ : List[str] = Path(self.dummy_file ).parent
snake_case__ : Dict = path.relative_to(__SCREAMING_SNAKE_CASE )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
snake_case__ : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = Path(__SCREAMING_SNAKE_CASE )
snake_case__ : int = _iter_archive_members(__SCREAMING_SNAKE_CASE ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(__SCREAMING_SNAKE_CASE ).as_posix(), file_path.open("""rb""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = [paths]
for path in paths:
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(__SCREAMING_SNAKE_CASE ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 38 | 0 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = AlbertConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(F"""Building PyTorch model from configuration: {config}""" )
__lowercase = AlbertForPreTraining(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
snake_case__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
snake_case__ : Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 402 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = IFImgaImgSuperResolutionPipeline
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __UpperCamelCase ( self ):
return self._get_superresolution_dummy_components()
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
snake_case__ : List[Any] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : Tuple = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __UpperCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __UpperCamelCase ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __UpperCamelCase ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __UpperCamelCase ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __UpperCamelCase ( self ):
self._test_save_load_local()
def __UpperCamelCase ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 38 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
A_ = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 393 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
A_ : Dict = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : List[Any]=None , __magic_name__ : List[str]=None , __magic_name__ : List[str]=None ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[int] = True
while ask_again:
snake_case__ : Optional[Any] = input(__magic_name__ )
try:
if default is not None and len(__magic_name__ ) == 0:
return default
return convert_value(__magic_name__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Any=[] , __magic_name__ : Optional[int]=None , __magic_name__ : int=0 ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Union[str, Any] = BulletMenu(__magic_name__ , __magic_name__ )
snake_case__ : Optional[Any] = menu.run(default_choice=__magic_name__ )
return convert_value(__magic_name__ ) if convert_value is not None else result
def UpperCamelCase__ ( __magic_name__ : Any ) -> int:
'''simple docstring'''
snake_case__ : Tuple = int(__magic_name__ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def UpperCamelCase__ ( __magic_name__ : str ) -> Tuple:
'''simple docstring'''
snake_case__ : List[Any] = int(__magic_name__ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def UpperCamelCase__ ( __magic_name__ : List[str] ) -> List[Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = int(__magic_name__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def UpperCamelCase__ ( __magic_name__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = int(__magic_name__ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def UpperCamelCase__ ( __magic_name__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = int(__magic_name__ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def UpperCamelCase__ ( __magic_name__ : Dict ) -> Tuple:
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class __snake_case ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = super()._format_usage(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : str = usage.replace("""<command> [<args>] """ , """""" )
return usage
| 38 | 0 |
"""simple docstring"""
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__snake_case = CustomTokenizer
pass
| 434 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( __magic_name__ : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(__magic_name__ ) / len(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 | 0 |
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 335 |
'''simple docstring'''
from __future__ import annotations
A_ : str = "Muhammad Umer Farooq"
A_ : Optional[Any] = "MIT"
A_ : int = "1.0.0"
A_ : int = "Muhammad Umer Farooq"
A_ : int = "[email protected]"
A_ : Dict = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
super().__init__()
snake_case__ : list[str] = []
snake_case__ : List[Any] = domain
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
snake_case__ : str = parse.urljoin(self.domain , __SCREAMING_SNAKE_CASE )
self.urls.append(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
return ".".join(get_sub_domain_name(__magic_name__ ).split(""".""" )[-2:] )
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
return parse.urlparse(__magic_name__ ).netloc
def UpperCamelCase__ ( __magic_name__ : str = "https://github.com" ) -> list[str]:
'''simple docstring'''
snake_case__ : List[str] = get_domain_name(__magic_name__ )
# Initialize the parser
snake_case__ : Optional[Any] = Parser(__magic_name__ )
try:
# Open URL
snake_case__ : Any = requests.get(__magic_name__ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
snake_case__ : List[str] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
snake_case__ : Tuple = requests.get(__magic_name__ )
# Get the valid email.
snake_case__ : List[str] = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__magic_name__ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__magic_name__ )
if __name__ == "__main__":
A_ : str = emails_from_url("https://github.com")
print(F'{len(emails)} emails found:')
print("\n".join(sorted(emails)))
| 38 | 0 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : Optional[int] = logging.get_logger(__name__)
A__ : Tuple = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class lowercase ( __SCREAMING_SNAKE_CASE ):
__a = """segformer"""
def __init__( self , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=[2, 2, 2, 2] , SCREAMING_SNAKE_CASE__=[8, 4, 2, 1] , SCREAMING_SNAKE_CASE__=[32, 64, 160, 256] , SCREAMING_SNAKE_CASE__=[7, 3, 3, 3] , SCREAMING_SNAKE_CASE__=[4, 2, 2, 2] , SCREAMING_SNAKE_CASE__=[1, 2, 5, 8] , SCREAMING_SNAKE_CASE__=[4, 4, 4, 4] , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=1E-6 , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=255 , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'''Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'''
''' removed, as the behaviour will default to that of reshape_last_stage = True.''' , __SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ : Dict = num_channels
lowerCAmelCase__ : Optional[Any] = num_encoder_blocks
lowerCAmelCase__ : Any = depths
lowerCAmelCase__ : Optional[int] = sr_ratios
lowerCAmelCase__ : Tuple = hidden_sizes
lowerCAmelCase__ : List[str] = patch_sizes
lowerCAmelCase__ : str = strides
lowerCAmelCase__ : Optional[int] = mlp_ratios
lowerCAmelCase__ : Optional[Any] = num_attention_heads
lowerCAmelCase__ : Dict = hidden_act
lowerCAmelCase__ : Optional[int] = hidden_dropout_prob
lowerCAmelCase__ : List[str] = attention_probs_dropout_prob
lowerCAmelCase__ : List[Any] = classifier_dropout_prob
lowerCAmelCase__ : int = initializer_range
lowerCAmelCase__ : List[str] = drop_path_rate
lowerCAmelCase__ : int = layer_norm_eps
lowerCAmelCase__ : List[Any] = decoder_hidden_size
lowerCAmelCase__ : List[Any] = kwargs.get('''reshape_last_stage''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Dict = semantic_loss_ignore_index
class lowercase ( __SCREAMING_SNAKE_CASE ):
__a = version.parse("""1.11""" )
@property
def lowercase_ ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowercase_ ( self ):
"""simple docstring"""
return 1E-4
@property
def lowercase_ ( self ):
"""simple docstring"""
return 12
| 233 |
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> Tuple:
'''simple docstring'''
if not head:
return True
# split the list to two parts
snake_case__ , snake_case__ : Dict = head.next, head
while fast and fast.next:
snake_case__ : Any = fast.next.next
snake_case__ : int = slow.next
snake_case__ : Dict = slow.next
snake_case__ : List[str] = None # Don't forget here! But forget still works!
# reverse the second part
snake_case__ : Tuple = None
while second:
snake_case__ : Tuple = second.next
snake_case__ : Any = node
snake_case__ : str = second
snake_case__ : Optional[Any] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
snake_case__ : List[Any] = node.next
snake_case__ : int = head.next
return True
def UpperCamelCase__ ( __magic_name__ : Any ) -> Optional[Any]:
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
snake_case__ : List[Any] = head
while fast and fast.next:
snake_case__ , snake_case__ : Any = fast.next.next, slow.next
# 2. Push the second half into the stack
snake_case__ : Tuple = [slow.val]
while slow.next:
snake_case__ : Optional[Any] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
snake_case__ : str = cur.next
return True
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
if not head or not head.next:
return True
snake_case__ : int = {}
snake_case__ : Union[str, Any] = 0
while head:
if head.val in d:
d[head.val].append(__magic_name__ )
else:
snake_case__ : Tuple = [pos]
snake_case__ : Optional[Any] = head.next
pos += 1
snake_case__ : int = pos - 1
snake_case__ : str = 0
for v in d.values():
if len(__magic_name__ ) % 2 != 0:
middle += 1
else:
snake_case__ : List[str] = 0
for i in range(0 , len(__magic_name__ ) ):
if v[i] + v[len(__magic_name__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 38 | 0 |
from math import loga
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError('Input value must be a \'int\' type' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 47 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A_ : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A_ : str = 250004
A_ : str = 250020
@require_sentencepiece
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MBartTokenizer
lowerCamelCase__ = MBartTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
def __UpperCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case__ : Tuple = MBartTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = MBartTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
snake_case__ : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
snake_case__ : Optional[int] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def __UpperCamelCase ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case__ : Optional[int] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = tempfile.mkdtemp()
snake_case__ : int = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
snake_case__ : List[str] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case__ : Tuple = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=True
snake_case__ : Any = tempfile.mkdtemp()
snake_case__ : Optional[int] = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case__ : List[Any] = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=False
snake_case__ : Dict = tempfile.mkdtemp()
snake_case__ : Union[str, Any] = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case__ : Dict = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = '''facebook/mbart-large-en-ro'''
lowerCamelCase__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
lowerCamelCase__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
lowerCamelCase__ = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def __UpperCamelCase ( cls ):
snake_case__ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
snake_case__ : Any = 1
return cls
def __UpperCamelCase ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 2_5_0_0_2_0 )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
self.assertIn(__SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids )
snake_case__ : List[str] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
snake_case__ : List[Any] = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = ["""this is gunna be a long sentence """ * 2_0]
assert isinstance(src_text[0] , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = 1_0
snake_case__ : int = self.tokenizer(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __SCREAMING_SNAKE_CASE )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = tempfile.mkdtemp()
snake_case__ : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = MBartTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __SCREAMING_SNAKE_CASE )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
snake_case__ : int = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
snake_case__ : List[str] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
snake_case__ : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.tokenizer(self.src_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=3 , return_tensors="""pt""" )
snake_case__ : Optional[int] = self.tokenizer(
text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=1_0 , return_tensors="""pt""" )
snake_case__ : str = targets["""input_ids"""]
snake_case__ : Optional[Any] = shift_tokens_right(__SCREAMING_SNAKE_CASE , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , {
# A, test, EOS, en_XX
"""input_ids""": [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 2_5_0_0_0_1,
} , )
| 38 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _lowercase ( __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[Any] = (DPMSolverSinglestepScheduler,)
_SCREAMING_SNAKE_CASE : Any = (("num_inference_steps", 2_5),)
def a ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
__snake_case = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
"""sample_max_value""": 1.0,
"""algorithm_type""": """dpmsolver++""",
"""solver_type""": """midpoint""",
"""lambda_min_clipped""": -float('inf' ),
"""variance_type""": None,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple=0 , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict:
__snake_case = dict(self.forward_default_kwargs )
__snake_case = kwargs.pop('num_inference_steps' , __SCREAMING_SNAKE_CASE )
__snake_case = self.dummy_sample
__snake_case = 0.1 * sample
__snake_case = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__snake_case = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
__snake_case = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
__snake_case = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
__snake_case = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
__snake_case = dummy_past_residuals[: new_scheduler.config.solver_order]
__snake_case = sample, sample
for t in range(__SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ):
__snake_case = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
__snake_case = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a ( self : str ) -> List[str]:
pass
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any=0 , **SCREAMING_SNAKE_CASE_ : List[str] ) -> List[Any]:
__snake_case = dict(self.forward_default_kwargs )
__snake_case = kwargs.pop('num_inference_steps' , __SCREAMING_SNAKE_CASE )
__snake_case = self.dummy_sample
__snake_case = 0.1 * sample
__snake_case = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__snake_case = self.get_scheduler_config()
__snake_case = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
__snake_case = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
__snake_case = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
__snake_case = dummy_past_residuals[: new_scheduler.config.solver_order]
__snake_case = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
__snake_case = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a ( self : str , SCREAMING_SNAKE_CASE_ : Tuple=None , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Union[str, Any]:
if scheduler is None:
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
__snake_case = scheduler_class(**__SCREAMING_SNAKE_CASE )
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
__snake_case = scheduler_class(**__SCREAMING_SNAKE_CASE )
__snake_case = 10
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
__snake_case = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
return sample
def a ( self : Dict ) -> int:
__snake_case = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__snake_case = 50
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
__snake_case = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
__snake_case = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_5_7_4 ) < 1e-3
def a ( self : Optional[Any] ) -> Dict:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def a ( self : List[str] ) -> Tuple:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
__snake_case = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__snake_case = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE )
__snake_case = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3
__snake_case = DEISMultistepScheduler.from_config(scheduler.config )
__snake_case = DPMSolverMultistepScheduler.from_config(scheduler.config )
__snake_case = UniPCMultistepScheduler.from_config(scheduler.config )
__snake_case = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__snake_case = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE )
__snake_case = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3
def a ( self : List[Any] ) -> Any:
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , algorithm_type='dpmsolver++' , solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , )
def a ( self : Tuple ) -> Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def a ( self : str ) -> Optional[int]:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , )
__snake_case = self.full_loop(
solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , )
assert not torch.isnan(__SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers"
def a ( self : int ) -> Optional[int]:
self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE )
self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE )
def a ( self : Union[str, Any] ) -> Union[str, Any]:
self.check_over_configs(lambda_min_clipped=-float('inf' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def a ( self : str ) -> Dict:
self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE )
self.check_over_configs(variance_type='learned_range' )
def a ( self : Union[str, Any] ) -> List[Any]:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__SCREAMING_SNAKE_CASE , time_step=0 )
def a ( self : int ) -> Union[str, Any]:
__snake_case = self.full_loop()
__snake_case = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3
def a ( self : str ) -> int:
__snake_case = self.full_loop(use_karras_sigmas=__SCREAMING_SNAKE_CASE )
__snake_case = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_2_4_8 ) < 1e-3
def a ( self : List[str] ) -> List[Any]:
__snake_case = self.full_loop(prediction_type='v_prediction' )
__snake_case = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.1_4_5_3 ) < 1e-3
def a ( self : Dict ) -> int:
__snake_case = self.full_loop(prediction_type='v_prediction' , use_karras_sigmas=__SCREAMING_SNAKE_CASE )
__snake_case = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.0_6_4_9 ) < 1e-3
def a ( self : List[str] ) -> Any:
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config(thresholding=__SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 )
__snake_case = scheduler_class(**__SCREAMING_SNAKE_CASE )
__snake_case = 10
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter.half()
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
__snake_case = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
assert sample.dtype == torch.floataa
| 56 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : Dict = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''bit'''
lowerCamelCase__ = ['''preactivation''', '''bottleneck''']
lowerCamelCase__ = ['''SAME''', '''VALID''']
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE="preactivation" , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
snake_case__ : Tuple = global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported" )
snake_case__ : List[str] = num_channels
snake_case__ : Tuple = embedding_size
snake_case__ : str = hidden_sizes
snake_case__ : Optional[Any] = depths
snake_case__ : List[Any] = layer_type
snake_case__ : Dict = hidden_act
snake_case__ : Union[str, Any] = global_padding
snake_case__ : List[str] = num_groups
snake_case__ : str = drop_path_rate
snake_case__ : List[Any] = embedding_dynamic_padding
snake_case__ : List[str] = output_stride
snake_case__ : Dict = width_factor
snake_case__ : List[str] = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
snake_case__ , snake_case__ : Dict = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 38 | 0 |
from manim import *
class _a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase_ = Rectangle(height=0.2_5 , width=0.2_5 )
UpperCamelCase_ = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
UpperCamelCase_ = [mem.copy() for i in range(6 )]
UpperCamelCase_ = [mem.copy() for i in range(6 )]
UpperCamelCase_ = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase_ = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase_ = VGroup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase_ = Text('CPU' , font_size=24 )
UpperCamelCase_ = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = [mem.copy() for i in range(4 )]
UpperCamelCase_ = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase_ = Text('GPU' , font_size=24 )
UpperCamelCase_ = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE )
gpu.move_to([-1, -1, 0] )
self.add(__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = [mem.copy() for i in range(6 )]
UpperCamelCase_ = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase_ = Text('Model' , font_size=24 )
UpperCamelCase_ = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE )
model.move_to([3, -1.0, 0] )
self.add(__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = []
UpperCamelCase_ = []
UpperCamelCase_ = []
for i, rect in enumerate(__SCREAMING_SNAKE_CASE ):
rect.set_stroke(__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(__SCREAMING_SNAKE_CASE , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=__SCREAMING_SNAKE_CASE )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__SCREAMING_SNAKE_CASE , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__SCREAMING_SNAKE_CASE , buff=0.0 )
self.add(__SCREAMING_SNAKE_CASE )
model_cpu_arr.append(__SCREAMING_SNAKE_CASE )
self.add(*__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = [mem.copy() for i in range(6 )]
UpperCamelCase_ = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase_ = Text('Loaded Checkpoint' , font_size=24 )
UpperCamelCase_ = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE )
checkpoint.move_to([3, 0.5, 0] )
self.add(__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = []
UpperCamelCase_ = []
for i, rect in enumerate(__SCREAMING_SNAKE_CASE ):
UpperCamelCase_ = fill.copy().set_fill(__SCREAMING_SNAKE_CASE , opacity=0.7 )
target.move_to(__SCREAMING_SNAKE_CASE )
ckpt_arr.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__SCREAMING_SNAKE_CASE )
self.add(*__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase_ = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase_ = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(__SCREAMING_SNAKE_CASE , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = MarkupText(
f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
UpperCamelCase_ = [meta_mem.copy() for i in range(6 )]
UpperCamelCase_ = [meta_mem.copy() for i in range(6 )]
UpperCamelCase_ = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase_ = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase_ = VGroup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase_ = Text('Disk' , font_size=24 )
UpperCamelCase_ = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE )
disk.move_to([-4.0, -1.2_5, 0] )
self.play(Write(__SCREAMING_SNAKE_CASE , run_time=3 ) , Write(__SCREAMING_SNAKE_CASE , run_time=1 ) , Create(__SCREAMING_SNAKE_CASE , run_time=1 ) )
UpperCamelCase_ = []
for i, rect in enumerate(__SCREAMING_SNAKE_CASE ):
UpperCamelCase_ = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__SCREAMING_SNAKE_CASE , run_time=1.5 ) )
self.play(*__SCREAMING_SNAKE_CASE )
self.play(FadeOut(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase_ = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__SCREAMING_SNAKE_CASE , run_time=3 ) )
self.play(
FadeOut(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ) , )
self.wait()
| 23 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Optional[int] = logging.get_logger(__name__)
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : str=False ) -> Tuple:
'''simple docstring'''
snake_case__ : int = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Tuple=False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : int = """"""
else:
snake_case__ : Dict = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : int = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case__ : Union[str, Any] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Optional[Any] = in_proj_bias[: config.hidden_size]
snake_case__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : Optional[int] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case__ : str = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : List[str] = dct.pop(__magic_name__ )
snake_case__ : Dict = val
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Optional[int] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : int=False ) -> Optional[int]:
'''simple docstring'''
snake_case__ : int = BitConfig(
global_padding="""same""" , layer_type="""bottleneck""" , depths=(3, 4, 9) , out_features=["""stage3"""] , embedding_dynamic_padding=__magic_name__ , )
snake_case__ : Optional[int] = ViTHybridConfig(backbone_config=__magic_name__ , image_size=3_84 , num_labels=10_00 )
snake_case__ : Union[str, Any] = False
# load original model from timm
snake_case__ : List[Any] = timm.create_model(__magic_name__ , pretrained=__magic_name__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : Optional[int] = timm_model.state_dict()
if base_model:
remove_classification_head_(__magic_name__ )
snake_case__ : int = create_rename_keys(__magic_name__ , __magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ , __magic_name__ )
snake_case__ : str = """huggingface/label-files"""
snake_case__ : Union[str, Any] = """imagenet-1k-id2label.json"""
snake_case__ : Dict = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : List[Any] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case__ : int = idalabel
snake_case__ : str = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case__ : str = ViTHybridModel(__magic_name__ ).eval()
else:
snake_case__ : Union[str, Any] = ViTHybridForImageClassification(__magic_name__ ).eval()
model.load_state_dict(__magic_name__ )
# create image processor
snake_case__ : Optional[Any] = create_transform(**resolve_data_config({} , model=__magic_name__ ) )
snake_case__ : Union[str, Any] = transform.transforms
snake_case__ : Tuple = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
snake_case__ : Any = ViTHybridImageProcessor(
do_resize=__magic_name__ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__magic_name__ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=__magic_name__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case__ : Any = prepare_img()
snake_case__ : int = transform(__magic_name__ ).unsqueeze(0 )
snake_case__ : List[str] = processor(__magic_name__ , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(__magic_name__ , __magic_name__ )
# verify logits
with torch.no_grad():
snake_case__ : Optional[Any] = model(__magic_name__ )
snake_case__ : Union[str, Any] = outputs.logits
print("""Predicted class:""" , logits.argmax(-1 ).item() )
if base_model:
snake_case__ : Dict = timm_model.forward_features(__magic_name__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__magic_name__ , outputs.pooler_output , atol=1E-3 )
else:
snake_case__ : int = timm_model(__magic_name__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__magic_name__ , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(__magic_name__ )
if push_to_hub:
print(f"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(f"ybelkada/{vit_name}" )
processor.push_to_hub(f"ybelkada/{vit_name}" )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
A_ : Union[str, Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 38 | 0 |
import math
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> float:
if initial_intensity < 0:
raise ValueError('The value of intensity cannot be negative' )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__SCREAMING_SNAKE_CASE ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='''malus_law''')
| 410 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = 42
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=("DownEncoderBlock2D",) , __SCREAMING_SNAKE_CASE=(6_4,) , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE="silu" , __SCREAMING_SNAKE_CASE=True , ):
super().__init__()
snake_case__ : str = layers_per_block
snake_case__ : int = torch.nn.Convad(
__SCREAMING_SNAKE_CASE , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
snake_case__ : List[Any] = None
snake_case__ : List[Any] = nn.ModuleList([] )
# down
snake_case__ : Union[str, Any] = block_out_channels[0]
for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = output_channel
snake_case__ : Union[str, Any] = block_out_channels[i]
snake_case__ : int = i == len(__SCREAMING_SNAKE_CASE ) - 1
snake_case__ : str = get_down_block(
__SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block , in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__SCREAMING_SNAKE_CASE , resnet_groups=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
self.down_blocks.append(__SCREAMING_SNAKE_CASE )
# mid
snake_case__ : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
# out
snake_case__ : Tuple = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__SCREAMING_SNAKE_CASE , eps=1e-6 )
snake_case__ : Tuple = nn.SiLU()
snake_case__ : str = 2 * out_channels if double_z else out_channels
snake_case__ : int = nn.Convad(block_out_channels[-1] , __SCREAMING_SNAKE_CASE , 3 , padding=1 )
snake_case__ : Union[str, Any] = False
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = x
snake_case__ : int = self.conv_in(__SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
snake_case__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
# middle
snake_case__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
snake_case__ : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# middle
snake_case__ : str = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
snake_case__ : List[str] = down_block(__SCREAMING_SNAKE_CASE )
# middle
snake_case__ : str = self.mid_block(__SCREAMING_SNAKE_CASE )
# post-process
snake_case__ : Any = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = self.conv_act(__SCREAMING_SNAKE_CASE )
snake_case__ : str = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=("UpDecoderBlock2D",) , __SCREAMING_SNAKE_CASE=(6_4,) , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE="silu" , __SCREAMING_SNAKE_CASE="group" , ):
super().__init__()
snake_case__ : Any = layers_per_block
snake_case__ : Optional[Any] = nn.Convad(
__SCREAMING_SNAKE_CASE , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
snake_case__ : Union[str, Any] = None
snake_case__ : Dict = nn.ModuleList([] )
snake_case__ : Optional[int] = in_channels if norm_type == """spatial""" else None
# mid
snake_case__ : Tuple = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
# up
snake_case__ : List[Any] = list(reversed(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = output_channel
snake_case__ : Optional[Any] = reversed_block_out_channels[i]
snake_case__ : List[str] = i == len(__SCREAMING_SNAKE_CASE ) - 1
snake_case__ : int = get_up_block(
__SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , prev_output_channel=__SCREAMING_SNAKE_CASE , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , resnet_groups=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , resnet_time_scale_shift=__SCREAMING_SNAKE_CASE , )
self.up_blocks.append(__SCREAMING_SNAKE_CASE )
snake_case__ : int = output_channel
# out
if norm_type == "spatial":
snake_case__ : List[Any] = SpatialNorm(block_out_channels[0] , __SCREAMING_SNAKE_CASE )
else:
snake_case__ : Any = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__SCREAMING_SNAKE_CASE , eps=1e-6 )
snake_case__ : Tuple = nn.SiLU()
snake_case__ : Union[str, Any] = nn.Convad(block_out_channels[0] , __SCREAMING_SNAKE_CASE , 3 , padding=1 )
snake_case__ : int = False
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
snake_case__ : Union[str, Any] = z
snake_case__ : Any = self.conv_in(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
snake_case__ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
snake_case__ : int = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
else:
# middle
snake_case__ : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : str = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
# middle
snake_case__ : List[Any] = self.mid_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : Dict = up_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
snake_case__ : Optional[Any] = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : str = self.conv_norm_out(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.conv_act(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="random" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True ):
super().__init__()
snake_case__ : int = n_e
snake_case__ : Optional[int] = vq_embed_dim
snake_case__ : int = beta
snake_case__ : Optional[int] = legacy
snake_case__ : Dict = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
snake_case__ : List[str] = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
snake_case__ : Optional[Any] = self.used.shape[0]
snake_case__ : List[str] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
snake_case__ : Dict = self.re_embed
snake_case__ : List[str] = self.re_embed + 1
print(
f"Remapping {self.n_e} indices to {self.re_embed} indices. "
f"Using {self.unknown_index} for unknown indices." )
else:
snake_case__ : Union[str, Any] = n_e
snake_case__ : str = sane_index_shape
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
snake_case__ : Dict = inds.reshape(ishape[0] , -1 )
snake_case__ : Any = self.used.to(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = (inds[:, :, None] == used[None, None, ...]).long()
snake_case__ : List[Any] = match.argmax(-1 )
snake_case__ : List[str] = match.sum(2 ) < 1
if self.unknown_index == "random":
snake_case__ : List[str] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
snake_case__ : Optional[Any] = self.unknown_index
return new.reshape(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
snake_case__ : int = inds.reshape(ishape[0] , -1 )
snake_case__ : Optional[int] = self.used.to(__SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
snake_case__ : List[Any] = 0 # simply set to zero
snake_case__ : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __SCREAMING_SNAKE_CASE )
return back.reshape(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
# reshape z -> (batch, height, width, channel) and flatten
snake_case__ : Any = z.permute(0 , 2 , 3 , 1 ).contiguous()
snake_case__ : Optional[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
snake_case__ : Dict = torch.argmin(torch.cdist(__SCREAMING_SNAKE_CASE , self.embedding.weight ) , dim=1 )
snake_case__ : Union[str, Any] = self.embedding(__SCREAMING_SNAKE_CASE ).view(z.shape )
snake_case__ : List[str] = None
snake_case__ : Union[str, Any] = None
# compute loss for embedding
if not self.legacy:
snake_case__ : Tuple = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
snake_case__ : List[Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
snake_case__ : Any = z + (z_q - z).detach()
# reshape back to match original input shape
snake_case__ : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
snake_case__ : List[Any] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
snake_case__ : str = self.remap_to_used(__SCREAMING_SNAKE_CASE )
snake_case__ : str = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
snake_case__ : Tuple = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
snake_case__ : List[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
snake_case__ : Optional[int] = self.unmap_to_all(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
snake_case__ : int = self.embedding(__SCREAMING_SNAKE_CASE )
if shape is not None:
snake_case__ : str = z_q.view(__SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
snake_case__ : str = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
snake_case__ : Tuple = parameters
snake_case__ , snake_case__ : Any = torch.chunk(__SCREAMING_SNAKE_CASE , 2 , dim=1 )
snake_case__ : Union[str, Any] = torch.clamp(self.logvar , -30.0 , 20.0 )
snake_case__ : Optional[int] = deterministic
snake_case__ : Optional[int] = torch.exp(0.5 * self.logvar )
snake_case__ : Any = torch.exp(self.logvar )
if self.deterministic:
snake_case__ : List[str] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE = None ):
# make sure sample is on the same device as the parameters and has same dtype
snake_case__ : Dict = randn_tensor(
self.mean.shape , generator=__SCREAMING_SNAKE_CASE , device=self.parameters.device , dtype=self.parameters.dtype )
snake_case__ : Optional[int] = self.mean + self.std * sample
return x
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
snake_case__ : Any = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
return self.mean
| 38 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class A_ ( __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE = """perceiver"""
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any]=2_56 , __SCREAMING_SNAKE_CASE : Dict=12_80 , __SCREAMING_SNAKE_CASE : Union[str, Any]=7_68 , __SCREAMING_SNAKE_CASE : Optional[int]=1 , __SCREAMING_SNAKE_CASE : str=26 , __SCREAMING_SNAKE_CASE : Dict=8 , __SCREAMING_SNAKE_CASE : int=8 , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : List[Any]="kv" , __SCREAMING_SNAKE_CASE : List[Any]=1 , __SCREAMING_SNAKE_CASE : Dict=1 , __SCREAMING_SNAKE_CASE : str="gelu" , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : int=0.02 , __SCREAMING_SNAKE_CASE : str=1E-12 , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=2_62 , __SCREAMING_SNAKE_CASE : Union[str, Any]=20_48 , __SCREAMING_SNAKE_CASE : Any=56 , __SCREAMING_SNAKE_CASE : List[Any]=[3_68, 4_96] , __SCREAMING_SNAKE_CASE : List[str]=16 , __SCREAMING_SNAKE_CASE : str=19_20 , __SCREAMING_SNAKE_CASE : Union[str, Any]=16 , __SCREAMING_SNAKE_CASE : Optional[int]=[1, 16, 2_24, 2_24] , **__SCREAMING_SNAKE_CASE : str , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
__a = num_latents
__a = d_latents
__a = d_model
__a = num_blocks
__a = num_self_attends_per_block
__a = num_self_attention_heads
__a = num_cross_attention_heads
__a = qk_channels
__a = v_channels
__a = cross_attention_shape_for_attention
__a = self_attention_widening_factor
__a = cross_attention_widening_factor
__a = hidden_act
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = use_query_residual
# masked language modeling attributes
__a = vocab_size
__a = max_position_embeddings
# image classification attributes
__a = image_size
# flow attributes
__a = train_size
# multimodal autoencoding attributes
__a = num_frames
__a = audio_samples_per_frame
__a = samples_per_patch
__a = output_shape
class A_ ( __SCREAMING_SNAKE_CASE ):
@property
def _UpperCAmelCase ( self : Optional[int] ):
if self.task == "multiple-choice":
__a = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__a = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def _UpperCAmelCase ( self : List[str] ):
return 1E-4
def _UpperCAmelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : List[Any] = -1 , __SCREAMING_SNAKE_CASE : int = False , __SCREAMING_SNAKE_CASE : Tuple = None , __SCREAMING_SNAKE_CASE : Union[str, Any] = 3 , __SCREAMING_SNAKE_CASE : Optional[Any] = 40 , __SCREAMING_SNAKE_CASE : Tuple = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__a = compute_effective_axis_dimension(
__SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__a = preprocessor.num_special_tokens_to_add(__SCREAMING_SNAKE_CASE )
__a = compute_effective_axis_dimension(
__SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
__a = [""" """.join(["a"] ) * seq_length] * batch_size
__a = dict(preprocessor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE ) )
__a = inputs.pop("input_ids" )
return inputs
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__a = compute_effective_axis_dimension(__SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch )
__a = self._generate_dummy_images(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = dict(preprocessor(images=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE ) )
__a = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 197 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[1, 3_8_4, 2_4, 2_4] , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , ):
snake_case__ : str = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : Optional[int] = patch_size
snake_case__ : List[str] = num_channels
snake_case__ : Any = is_training
snake_case__ : int = use_labels
snake_case__ : str = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : str = backbone_out_indices
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Dict = intermediate_size
snake_case__ : Optional[Any] = hidden_act
snake_case__ : str = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : Dict = initializer_range
snake_case__ : Optional[int] = num_labels
snake_case__ : str = backbone_featmap_shape
snake_case__ : List[Any] = scope
snake_case__ : Optional[Any] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
snake_case__ : List[Any] = (image_size // patch_size) ** 2
snake_case__ : Union[str, Any] = num_patches + 1
def __UpperCamelCase ( self ):
snake_case__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : str = None
if self.use_labels:
snake_case__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
snake_case__ : Any = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [9_6, 1_9_2, 3_8_4, 7_6_8],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__SCREAMING_SNAKE_CASE , backbone_featmap_shape=self.backbone_featmap_shape , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = DPTModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = self.num_labels
snake_case__ : str = DPTForDepthEstimation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = self.num_labels
snake_case__ : Dict = DPTForSemanticSegmentation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : str = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Any = config_and_inputs
snake_case__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowerCamelCase__ = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = DPTModelTester(self )
snake_case__ : Any = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Tuple = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : List[str] = [*signature.parameters.keys()]
snake_case__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : int = True
if model_class in get_values(__SCREAMING_SNAKE_CASE ):
continue
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
snake_case__ : Optional[Any] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCamelCase ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Union[str, Any] = False
snake_case__ : str = True
if model_class in get_values(__SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
snake_case__ : List[str] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : str = _config_zero_init(__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(config=__SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
snake_case__ : str = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
snake_case__ : Optional[int] = [f"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __UpperCamelCase ( self ):
pass
@slow
def __UpperCamelCase ( self ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
snake_case__ : List[str] = DPTModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Dict = """add"""
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] = DPTForDepthEstimation(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> Dict:
'''simple docstring'''
snake_case__ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Dict = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
snake_case__ : Union[str, Any] = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = prepare_img()
snake_case__ : Optional[int] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case__ : Dict = model(**__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = outputs.predicted_depth
# verify the predicted depth
snake_case__ : Any = torch.Size((1, 3_8_4, 3_8_4) )
self.assertEqual(predicted_depth.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 38 | 0 |
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __UpperCamelCase (lowerCAmelCase : Any, lowerCAmelCase : Any ) -> str:
assert isinstance(lowerCAmelCase, lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def __UpperCamelCase (lowerCAmelCase : Optional[Any], lowerCAmelCase : Tuple, lowerCAmelCase : Optional[int] ) -> str:
A = tmp_path / """cache"""
A = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A = TextDatasetReader(lowerCAmelCase, cache_dir=lowerCAmelCase, keep_in_memory=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase, lowerCAmelCase )
@pytest.mark.parametrize(
'features', [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
], )
def __UpperCamelCase (lowerCAmelCase : Any, lowerCAmelCase : Any, lowerCAmelCase : Dict ) -> Optional[int]:
A = tmp_path / """cache"""
A = {"""text""": """string"""}
A = features.copy() if features else default_expected_features
A = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A = TextDatasetReader(lowerCAmelCase, features=lowerCAmelCase, cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase, lowerCAmelCase )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def __UpperCamelCase (lowerCAmelCase : Any, lowerCAmelCase : Optional[int], lowerCAmelCase : Union[str, Any] ) -> List[str]:
A = tmp_path / """cache"""
A = {"""text""": """string"""}
A = TextDatasetReader(lowerCAmelCase, cache_dir=lowerCAmelCase, split=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase, lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type', [str, list] )
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : int, lowerCAmelCase : Optional[int] ) -> Optional[int]:
if issubclass(lowerCAmelCase, lowerCAmelCase ):
A = text_path
elif issubclass(lowerCAmelCase, lowerCAmelCase ):
A = [text_path]
A = tmp_path / """cache"""
A = {"""text""": """string"""}
A = TextDatasetReader(lowerCAmelCase, cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase, lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : List[Any], lowerCAmelCase : List[str], lowerCAmelCase : Dict=("train",) ) -> Union[str, Any]:
assert isinstance(lowerCAmelCase, lowerCAmelCase )
for split in splits:
A = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : List[str], lowerCAmelCase : Any ) -> List[Any]:
A = tmp_path / """cache"""
A = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A = TextDatasetReader({'train': text_path}, cache_dir=lowerCAmelCase, keep_in_memory=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase, lowerCAmelCase )
@pytest.mark.parametrize(
'features', [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
], )
def __UpperCamelCase (lowerCAmelCase : Union[str, Any], lowerCAmelCase : Any, lowerCAmelCase : Optional[Any] ) -> List[str]:
A = tmp_path / """cache"""
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
A = {"""text""": """string"""}
A = features.copy() if features else default_expected_features
A = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A = TextDatasetReader({'train': text_path}, features=lowerCAmelCase, cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase, lowerCAmelCase )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def __UpperCamelCase (lowerCAmelCase : Dict, lowerCAmelCase : Any, lowerCAmelCase : Dict ) -> Union[str, Any]:
if split:
A = {split: text_path}
else:
A = """train"""
A = {"""train""": text_path, """test""": text_path}
A = tmp_path / """cache"""
A = {"""text""": """string"""}
A = TextDatasetReader(lowerCAmelCase, cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase, lowerCAmelCase, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 699 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case__ : int = botoa.client("""iam""" )
snake_case__ : Union[str, Any] = {
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=__magic_name__ , AssumeRolePolicyDocument=json.dumps(__magic_name__ , indent=2 ) )
snake_case__ : Dict = {
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=__magic_name__ , PolicyName=f"{role_name}_policy_permission" , PolicyDocument=json.dumps(__magic_name__ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f"role {role_name} already exists. Using existing one" )
def UpperCamelCase__ ( __magic_name__ : Any ) -> Tuple:
'''simple docstring'''
snake_case__ : List[str] = botoa.client("""iam""" )
return iam_client.get_role(RoleName=__magic_name__ )["Role"]["Arn"]
def UpperCamelCase__ ( ) -> Tuple:
'''simple docstring'''
snake_case__ : Union[str, Any] = _ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , __magic_name__ , )
snake_case__ : List[Any] = None
if credentials_configuration == 0:
snake_case__ : Dict = _ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
snake_case__ : List[str] = aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
snake_case__ : List[str] = _ask_field("""AWS Access Key ID: """ )
snake_case__ : int = aws_access_key_id
snake_case__ : Optional[Any] = _ask_field("""AWS Secret Access Key: """ )
snake_case__ : List[str] = aws_secret_access_key
snake_case__ : Tuple = _ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
snake_case__ : Optional[int] = aws_region
snake_case__ : int = _ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , __magic_name__ , )
if role_management == 0:
snake_case__ : Optional[Any] = _ask_field("""Enter your IAM role name: """ )
else:
snake_case__ : Optional[int] = """accelerate_sagemaker_execution_role"""
print(f"Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials" )
_create_iam_role_for_sagemaker(__magic_name__ )
snake_case__ : Dict = _ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Any = None
if is_custom_docker_image:
snake_case__ : str = _ask_field("""Enter your Docker image: """ , lambda __magic_name__ : str(__magic_name__ ).lower() )
snake_case__ : Tuple = _ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : List[Any] = None
if is_sagemaker_inputs_enabled:
snake_case__ : str = _ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda __magic_name__ : str(__magic_name__ ).lower() , )
snake_case__ : Optional[int] = _ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Optional[Any] = None
if is_sagemaker_metrics_enabled:
snake_case__ : List[Any] = _ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda __magic_name__ : str(__magic_name__ ).lower() , )
snake_case__ : Tuple = _ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
snake_case__ : Any = {}
snake_case__ : List[Any] = _ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
if use_dynamo:
snake_case__ : str = """dynamo_"""
snake_case__ : Tuple = _ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
snake_case__ : List[str] = _ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
if use_custom_options:
snake_case__ : str = _ask_options(
"""Which mode do you want to use?""" , __magic_name__ , lambda __magic_name__ : TORCH_DYNAMO_MODES[int(__magic_name__ )] , default="""default""" , )
snake_case__ : Union[str, Any] = _ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : str = _ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Dict = """Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
snake_case__ : List[str] = _ask_options(
__magic_name__ , __magic_name__ , lambda __magic_name__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(__magic_name__ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
snake_case__ : Optional[int] = _ask_field(__magic_name__ , lambda __magic_name__ : str(__magic_name__ ).lower() , default="""ml.p3.2xlarge""" )
snake_case__ : Dict = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
snake_case__ : Optional[Any] = _ask_field(
"""How many machines do you want use? [1]: """ , __magic_name__ , default=1 , )
snake_case__ : Union[str, Any] = _ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=__magic_name__ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=__magic_name__ , use_cpu=__magic_name__ , dynamo_config=__magic_name__ , eca_instance_type=__magic_name__ , profile=__magic_name__ , region=__magic_name__ , iam_role_name=__magic_name__ , mixed_precision=__magic_name__ , num_machines=__magic_name__ , sagemaker_inputs_file=__magic_name__ , sagemaker_metrics_file=__magic_name__ , )
| 38 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_snake_case : Tuple = KandinskyVaaInpaintPipeline
_snake_case : Optional[Any] = ["""image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
_snake_case : List[str] = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
_snake_case : Dict = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_snake_case : List[Any] = False
@property
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
return 32
@property
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
return 32
@property
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def _snake_case ( self : Any ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _snake_case ( self : List[Any] ):
'''simple docstring'''
return 100
@property
def _snake_case ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__lowercase = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _snake_case ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = VQModel(**self.dummy_movq_kwargs )
return model
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = self.dummy_unet
__lowercase = self.dummy_movq
__lowercase = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="linear" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , steps_offset=1 , prediction_type="epsilon" , thresholding=__SCREAMING_SNAKE_CASE , )
__lowercase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _snake_case ( self : int , lowerCamelCase : Dict , lowerCamelCase : List[Any]=0 ):
'''simple docstring'''
__lowercase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__lowercase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__SCREAMING_SNAKE_CASE )
# create init_image
__lowercase = floats_tensor((1, 3, 64, 64) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__lowercase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowercase = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert("RGB" ).resize((256, 256) )
# create mask
__lowercase = np.ones((64, 64) , dtype=np.floataa )
__lowercase = 0
if str(__SCREAMING_SNAKE_CASE ).startswith("mps" ):
__lowercase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__lowercase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__lowercase = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = """cpu"""
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
__lowercase = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowercase = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
__lowercase = output.images
__lowercase = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) , return_dict=__SCREAMING_SNAKE_CASE , )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
__lowercase = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def _snake_case ( self : List[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : List[str] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowercase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy" )
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
__lowercase = np.ones((768, 768) , dtype=np.floataa )
__lowercase = 0
__lowercase = """a hat"""
__lowercase = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(__SCREAMING_SNAKE_CASE )
__lowercase = KandinskyVaaInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder-inpaint" , torch_dtype=torch.floataa )
__lowercase = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowercase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowercase = pipe_prior(
__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
__lowercase = pipeline(
image=__SCREAMING_SNAKE_CASE , mask_image=__SCREAMING_SNAKE_CASE , image_embeds=__SCREAMING_SNAKE_CASE , negative_image_embeds=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=100 , height=768 , width=768 , output_type="np" , )
__lowercase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 402 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def UpperCamelCase__ ( __magic_name__ : str = "laptop" ) -> DataFrame:
'''simple docstring'''
snake_case__ : Union[str, Any] = f"https://www.amazon.in/laptop/s?k={product}"
snake_case__ : List[str] = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
snake_case__ : int = BeautifulSoup(requests.get(__magic_name__ , headers=__magic_name__ ).text )
# Initialize a Pandas dataframe with the column titles
snake_case__ : Optional[Any] = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
snake_case__ : Optional[int] = item.ha.text
snake_case__ : Any = """https://www.amazon.in/""" + item.ha.a["""href"""]
snake_case__ : List[str] = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
snake_case__ : Dict = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
snake_case__ : Optional[int] = """Not available"""
try:
snake_case__ : Tuple = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
snake_case__ : Optional[Any] = """"""
try:
snake_case__ : str = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 1_00 )
except ValueError:
snake_case__ : List[Any] = float("""nan""" )
except AttributeError:
pass
snake_case__ : str = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
snake_case__ : List[Any] = """ """
snake_case__ : Union[str, Any] = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
A_ : int = "headphones"
get_amazon_product_data(product).to_csv(F'Amazon Product Data for {product}.csv')
| 38 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 393 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = LongformerTokenizer
lowerCamelCase__ = True
lowerCamelCase__ = LongformerTokenizerFast
lowerCamelCase__ = True
def __UpperCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
snake_case__ : Optional[int] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
snake_case__ : int = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case__ : Any = {"""unk_token""": """<unk>"""}
snake_case__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = """lower newer"""
snake_case__ : Dict = """lower newer"""
return input_text, output_text
def __UpperCamelCase ( self ):
snake_case__ : int = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case__ : Tuple = """lower newer"""
snake_case__ : Optional[Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
snake_case__ : Tuple = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) # , add_prefix_space=True)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokens + [tokenizer.unk_token]
snake_case__ : List[Any] = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
snake_case__ : int = tokenizer.encode("""sequence builders""" , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.get_tokenizer()
snake_case__ : int = """Encode this sequence."""
snake_case__ : Union[str, Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
snake_case__ : Optional[int] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
snake_case__ : List[str] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing spaces after special tokens
snake_case__ : List[str] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )} ) # mask token has a left space
snake_case__ : Dict = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
snake_case__ : str = """Encode <mask> sequence"""
snake_case__ : Tuple = """Encode <mask>sequence"""
snake_case__ : Union[str, Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = encoded.index(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer.encode(__SCREAMING_SNAKE_CASE )
snake_case__ : str = encoded.index(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = """A, <mask> AllenNLP sentence."""
snake_case__ : str = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
snake_case__ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
snake_case__ : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def __UpperCamelCase ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
snake_case__ : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
snake_case__ : List[str] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state["""trim_offsets"""] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : Union[str, Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
snake_case__ : Any = f"{text_of_1_token} {text_of_1_token}"
snake_case__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Tuple = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Optional[Any] = f" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
snake_case__ : Dict = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ) + 1, 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
| 38 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Dict =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any ={
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__snake_case = """camembert"""
def __init__( self , _lowercase=30522 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=2 , _lowercase=0.02 , _lowercase=1E-12 , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase="absolute" , _lowercase=True , _lowercase=None , **_lowercase , ) -> int:
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
_lowerCamelCase : Optional[int] = vocab_size
_lowerCamelCase : List[Any] = hidden_size
_lowerCamelCase : int = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : str = intermediate_size
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : Union[str, Any] = max_position_embeddings
_lowerCamelCase : List[Any] = type_vocab_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Any = layer_norm_eps
_lowerCamelCase : Union[str, Any] = position_embedding_type
_lowerCamelCase : Tuple = use_cache
_lowerCamelCase : Dict = classifier_dropout
class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@property
def a__ ( self ) -> Union[str, Any]:
if self.task == "multiple-choice":
_lowerCamelCase : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCamelCase : Optional[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 434 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : Any = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''resnet'''
lowerCamelCase__ = ['''basic''', '''bottleneck''']
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE="bottleneck" , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
snake_case__ : List[Any] = num_channels
snake_case__ : str = embedding_size
snake_case__ : List[Any] = hidden_sizes
snake_case__ : Dict = depths
snake_case__ : List[Any] = layer_type
snake_case__ : int = hidden_act
snake_case__ : Union[str, Any] = downsample_in_first_stage
snake_case__ : Dict = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
snake_case__ , snake_case__ : Any = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1e-3
| 38 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case : Union[str, Any] = {
"configuration_bert": ["BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BertConfig", "BertOnnxConfig"],
"tokenization_bert": ["BasicTokenizer", "BertTokenizer", "WordpieceTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[str] = ["BertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : int = [
"BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BertForMaskedLM",
"BertForMultipleChoice",
"BertForNextSentencePrediction",
"BertForPreTraining",
"BertForQuestionAnswering",
"BertForSequenceClassification",
"BertForTokenClassification",
"BertLayer",
"BertLMHeadModel",
"BertModel",
"BertPreTrainedModel",
"load_tf_weights_in_bert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[int] = [
"TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBertEmbeddings",
"TFBertForMaskedLM",
"TFBertForMultipleChoice",
"TFBertForNextSentencePrediction",
"TFBertForPreTraining",
"TFBertForQuestionAnswering",
"TFBertForSequenceClassification",
"TFBertForTokenClassification",
"TFBertLMHeadModel",
"TFBertMainLayer",
"TFBertModel",
"TFBertPreTrainedModel",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[int] = ["TFBertTokenizer"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Any = [
"FlaxBertForCausalLM",
"FlaxBertForMaskedLM",
"FlaxBertForMultipleChoice",
"FlaxBertForNextSentencePrediction",
"FlaxBertForPreTraining",
"FlaxBertForQuestionAnswering",
"FlaxBertForSequenceClassification",
"FlaxBertForTokenClassification",
"FlaxBertModel",
"FlaxBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
snake_case : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 335 |
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 38 | 0 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowercase ( __SCREAMING_SNAKE_CASE ):
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
super().__init__(features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[int] = Sql(
cache_dir=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , sql=__SCREAMING_SNAKE_CASE , con=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Any = None
lowerCAmelCase__ : int = None
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : List[Any] = None
self.builder.download_and_prepare(
download_config=__SCREAMING_SNAKE_CASE , download_mode=__SCREAMING_SNAKE_CASE , verification_mode=__SCREAMING_SNAKE_CASE , base_path=__SCREAMING_SNAKE_CASE , )
# Build dataset for splits
lowerCAmelCase__ : List[str] = self.builder.as_dataset(
split='''train''' , verification_mode=__SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory )
return dataset
class lowercase :
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
lowerCAmelCase__ : Dict = dataset
lowerCAmelCase__ : Any = name
lowerCAmelCase__ : Tuple = con
lowerCAmelCase__ : Any = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowerCAmelCase__ : Optional[int] = num_proc
lowerCAmelCase__ : str = to_sql_kwargs
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Dict = self.to_sql_kwargs.pop('''sql''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : int = self.to_sql_kwargs.pop('''con''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[str] = self.to_sql_kwargs.pop('''index''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Any = self._write(index=__SCREAMING_SNAKE_CASE , **self.to_sql_kwargs )
return written
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = args
lowerCAmelCase__ : List[str] = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
lowerCAmelCase__ : List[str] = query_table(
table=self.dataset.data , key=slice(__SCREAMING_SNAKE_CASE , offset + self.batch_size ) , indices=self.dataset._indices , )
lowerCAmelCase__ : int = batch.to_pandas()
lowerCAmelCase__ : Optional[Any] = df.to_sql(self.name , self.con , index=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
return num_rows or len(__SCREAMING_SNAKE_CASE )
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : int = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
lowerCAmelCase__ : int = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 233 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ):
snake_case__ : str = []
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_init_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_train_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_train_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_epoch_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_epoch_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_step_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_step_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_evaluate""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_predict""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_save""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_log""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_prediction_step""" )
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Tuple = tempfile.mkdtemp()
def __UpperCamelCase ( self ):
shutil.rmtree(self.output_dir )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
snake_case__ : List[Any] = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionModelConfig(a=__SCREAMING_SNAKE_CASE , b=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionPreTrainedModel(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = TrainingArguments(self.output_dir , disable_tqdm=__SCREAMING_SNAKE_CASE , report_to=[] , **__SCREAMING_SNAKE_CASE )
return Trainer(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , train_dataset=__SCREAMING_SNAKE_CASE , eval_dataset=__SCREAMING_SNAKE_CASE , callbacks=__SCREAMING_SNAKE_CASE , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
# Order doesn't matter
snake_case__ : Tuple = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
snake_case__ : List[str] = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
for cba, cba in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , cba.__class__ )
elif not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(cba.__class__ , __SCREAMING_SNAKE_CASE )
else:
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = ["""on_init_end""", """on_train_begin"""]
snake_case__ : Union[str, Any] = 0
snake_case__ : Dict = len(trainer.get_eval_dataloader() )
snake_case__ : Any = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("""on_epoch_begin""" )
for _ in range(__SCREAMING_SNAKE_CASE ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""" )
expected_events.append("""on_epoch_end""" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __UpperCamelCase ( self ):
snake_case__ : Any = self.get_trainer()
snake_case__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# Callbacks passed at init are added to the default callbacks
snake_case__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case__ : Optional[Any] = self.get_trainer(disable_tqdm=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case__ : int = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.remove(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = self.get_trainer()
snake_case__ : List[str] = trainer.pop_callback(__SCREAMING_SNAKE_CASE )
self.assertEqual(cb.__class__ , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
trainer.add_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# We can also add, pop, or remove by instance
snake_case__ : List[Any] = self.get_trainer()
snake_case__ : List[str] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.remove(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = self.get_trainer()
snake_case__ : Any = trainer.callback_handler.callbacks[0]
snake_case__ : Optional[Any] = trainer.pop_callback(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
trainer.add_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# Independent log/save/eval
snake_case__ : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" )
trainer.train()
snake_case__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# A bit of everything
snake_case__ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
snake_case__ : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock:
snake_case__ : List[str] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(__SCREAMING_SNAKE_CASE ) in warn_mock.call_args[0][0]
| 38 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
__a : Dict = TFXLMRobertaModel.from_pretrained('jplu/tf-xlm-roberta-base' )
__a : Union[str, Any] = {
"""input_ids""": tf.convert_to_tensor([[0, 2_6_4_6, 1_0_2_6_9, 8_3, 9_9_9_4_2, 2]] , dtype=tf.intaa ), # "My dog is cute"
"""attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
__a : Optional[Any] = model(__SCREAMING_SNAKE_CASE )["""last_hidden_state"""]
__a : Optional[Any] = tf.TensorShape((1, 6, 7_6_8) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
__a : str = tf.convert_to_tensor(
[
[
[0.0_681_762, 0.10_894_451, 0.06_772_504],
[-0.06_423_668, 0.02_366_615, 0.04_329_344],
[-0.06_057_295, 0.09_974_135, -0.00_070_584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 47 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=1_8 , __SCREAMING_SNAKE_CASE=3_0 , __SCREAMING_SNAKE_CASE=4_0_0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ):
snake_case__ : Any = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case__ : List[Any] = parent
snake_case__ : int = batch_size
snake_case__ : List[Any] = num_channels
snake_case__ : str = image_size
snake_case__ : Union[str, Any] = min_resolution
snake_case__ : List[Any] = max_resolution
snake_case__ : Tuple = do_resize
snake_case__ : int = size
snake_case__ : Tuple = do_normalize
snake_case__ : Dict = image_mean
snake_case__ : Union[str, Any] = image_std
def __UpperCamelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = DPTImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self ):
snake_case__ : str = DPTImageProcessingTester(self )
@property
def __UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_std""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """size""" ) )
def __UpperCamelCase ( self ):
snake_case__ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} )
snake_case__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
snake_case__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
snake_case__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : Any = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
snake_case__ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 38 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _lowercase ( unittest.TestCase ):
def a ( self : Dict ) -> Optional[Any]:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__snake_case = [[1, 2, 4], [1, 2, 3, 4]]
__snake_case = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
self.assertTrue(isinstance(dc.token_ids , __SCREAMING_SNAKE_CASE ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def a ( self : Any ) -> Any:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__snake_case = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) # fails here
def a ( self : Optional[int] ) -> Optional[Any]:
__snake_case = [[1, 2, 3], [1, 2, 4]]
__snake_case = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
__snake_case = dc.update(1 )
__snake_case = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__snake_case = dc.update(2 )
__snake_case = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__snake_case = dc.update(3 )
__snake_case = stepped is True and completed is True and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def a ( self : Optional[Any] ) -> Optional[int]:
__snake_case = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__snake_case = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
__snake_case = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__snake_case = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__snake_case = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__snake_case = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__snake_case = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__snake_case = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__snake_case = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 56 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """embed_dim""" ) )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """num_heads""" ) )
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[1_6, 4_8, 9_6] , __SCREAMING_SNAKE_CASE=[1, 3, 6] , __SCREAMING_SNAKE_CASE=[1, 2, 1_0] , __SCREAMING_SNAKE_CASE=[7, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2] , __SCREAMING_SNAKE_CASE=[2, 1, 1] , __SCREAMING_SNAKE_CASE=[2, 2, 2] , __SCREAMING_SNAKE_CASE=[False, False, True] , __SCREAMING_SNAKE_CASE=[0.0, 0.0, 0.0] , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-1_2 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=2 , ):
snake_case__ : List[str] = parent
snake_case__ : Tuple = batch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : List[Any] = patch_sizes
snake_case__ : Optional[int] = patch_stride
snake_case__ : Optional[Any] = patch_padding
snake_case__ : Any = is_training
snake_case__ : int = use_labels
snake_case__ : Dict = num_labels
snake_case__ : Optional[Any] = num_channels
snake_case__ : Optional[Any] = embed_dim
snake_case__ : Optional[int] = num_heads
snake_case__ : Optional[int] = stride_kv
snake_case__ : int = depth
snake_case__ : Optional[Any] = cls_token
snake_case__ : List[Any] = attention_drop_rate
snake_case__ : Union[str, Any] = initializer_range
snake_case__ : List[Any] = layer_norm_eps
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : List[Any] = None
if self.use_labels:
# create a random int32 tensor of given shape
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : List[str] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = TFCvtModel(config=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = (self.image_size, self.image_size)
snake_case__ , snake_case__ : str = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
snake_case__ : Any = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
snake_case__ : Optional[int] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = self.num_labels
snake_case__ : str = TFCvtForImageClassification(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Any = config_and_inputs
snake_case__ : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
lowerCamelCase__ = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = TFCvtModelTester(self )
snake_case__ : Any = TFCvtConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def __UpperCamelCase ( self ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def __UpperCamelCase ( self ):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def __UpperCamelCase ( self ):
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def __UpperCamelCase ( self ):
snake_case__ : List[str] = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(__SCREAMING_SNAKE_CASE )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Optional[Any] = [*signature.parameters.keys()]
snake_case__ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
def check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[int] = outputs.hidden_states
snake_case__ : Tuple = len(self.model_tester.depth )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[Any] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : List[str] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def __UpperCamelCase ( self ):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : str = TFCvtModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self ):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case__ : Union[str, Any] = self.default_image_processor
snake_case__ : int = prepare_img()
snake_case__ : Dict = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""tf""" )
# forward pass
snake_case__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
snake_case__ : str = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : int = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 38 | 0 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _snake_case (__lowercase , __lowercase=None):
UpperCamelCase_ = None
if token is not None:
UpperCamelCase_ = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""}
UpperCamelCase_ = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
UpperCamelCase_ = requests.get(__lowercase , headers=__lowercase).json()
UpperCamelCase_ = {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']})
UpperCamelCase_ = math.ceil((result['total_count'] - 100) / 100)
for i in range(__lowercase):
UpperCamelCase_ = requests.get(url + f"""&page={i + 2}""" , headers=__lowercase).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']})
return job_links
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""")
return {}
def _snake_case (__lowercase , __lowercase=None):
UpperCamelCase_ = None
if token is not None:
UpperCamelCase_ = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""}
UpperCamelCase_ = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
UpperCamelCase_ = requests.get(__lowercase , headers=__lowercase).json()
UpperCamelCase_ = {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']})
UpperCamelCase_ = math.ceil((result['total_count'] - 100) / 100)
for i in range(__lowercase):
UpperCamelCase_ = requests.get(url + f"""&page={i + 2}""" , headers=__lowercase).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']})
return artifacts
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""")
return {}
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase):
UpperCamelCase_ = None
if token is not None:
UpperCamelCase_ = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""}
UpperCamelCase_ = requests.get(__lowercase , headers=__lowercase , allow_redirects=__lowercase)
UpperCamelCase_ = result.headers["""Location"""]
UpperCamelCase_ = requests.get(__lowercase , allow_redirects=__lowercase)
UpperCamelCase_ = os.path.join(__lowercase , f"""{artifact_name}.zip""")
with open(__lowercase , 'wb') as fp:
fp.write(response.content)
def _snake_case (__lowercase , __lowercase=None):
UpperCamelCase_ = []
UpperCamelCase_ = []
UpperCamelCase_ = None
with zipfile.ZipFile(__lowercase) as z:
for filename in z.namelist():
if not os.path.isdir(__lowercase):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__lowercase) as f:
for line in f:
UpperCamelCase_ = line.decode('UTF-8').strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
UpperCamelCase_ = line[: line.index(': ')]
UpperCamelCase_ = line[line.index(': ') + len(': ') :]
errors.append([error_line, error])
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED '):
# `test` is the test method that failed
UpperCamelCase_ = line[len('FAILED ') :]
failed_tests.append(__lowercase)
elif filename == "job_name.txt":
UpperCamelCase_ = line
if len(__lowercase) != len(__lowercase):
raise ValueError(
f"""`errors` and `failed_tests` should have the same number of elements. Got {len(__lowercase)} for `errors` """
f"""and {len(__lowercase)} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
' problem.')
UpperCamelCase_ = None
if job_name and job_links:
UpperCamelCase_ = job_links.get(__lowercase , __lowercase)
# A list with elements of the form (line of error, error, failed test)
UpperCamelCase_ = [x + [y] + [job_link] for x, y in zip(__lowercase , __lowercase)]
return result
def _snake_case (__lowercase , __lowercase=None):
UpperCamelCase_ = []
UpperCamelCase_ = [os.path.join(__lowercase , __lowercase) for p in os.listdir(__lowercase) if p.endswith('.zip')]
for p in paths:
errors.extend(get_errors_from_single_artifact(__lowercase , job_links=__lowercase))
return errors
def _snake_case (__lowercase , __lowercase=None):
UpperCamelCase_ = Counter()
counter.update([x[1] for x in logs])
UpperCamelCase_ = counter.most_common()
UpperCamelCase_ = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
UpperCamelCase_ = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
UpperCamelCase_ = dict(sorted(r.items() , key=lambda __lowercase: item[1]["count"] , reverse=__lowercase))
return r
def _snake_case (__lowercase):
UpperCamelCase_ = test.split('::')[0]
if test.startswith('tests/models/'):
UpperCamelCase_ = test.split('/')[2]
else:
UpperCamelCase_ = None
return test
def _snake_case (__lowercase , __lowercase=None):
UpperCamelCase_ = [(x[0], x[1], get_model(x[2])) for x in logs]
UpperCamelCase_ = [x for x in logs if x[2] is not None]
UpperCamelCase_ = {x[2] for x in logs}
UpperCamelCase_ = {}
for test in tests:
UpperCamelCase_ = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test])
UpperCamelCase_ = counter.most_common()
UpperCamelCase_ = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
UpperCamelCase_ = sum(error_counts.values())
if n_errors > 0:
UpperCamelCase_ = {"""count""": n_errors, """errors""": error_counts}
UpperCamelCase_ = dict(sorted(r.items() , key=lambda __lowercase: item[1]["count"] , reverse=__lowercase))
return r
def _snake_case (__lowercase):
UpperCamelCase_ = """| no. | error | status |"""
UpperCamelCase_ = """|-:|:-|:-|"""
UpperCamelCase_ = [header, sep]
for error in reduced_by_error:
UpperCamelCase_ = reduced_by_error[error]["""count"""]
UpperCamelCase_ = f"""| {count} | {error[:100]} | |"""
lines.append(__lowercase)
return "\n".join(__lowercase)
def _snake_case (__lowercase):
UpperCamelCase_ = """| model | no. of errors | major error | count |"""
UpperCamelCase_ = """|-:|-:|-:|-:|"""
UpperCamelCase_ = [header, sep]
for model in reduced_by_model:
UpperCamelCase_ = reduced_by_model[model]["""count"""]
UpperCamelCase_ = list(reduced_by_model[model]['errors'].items())[0]
UpperCamelCase_ = f"""| {model} | {count} | {error[:60]} | {_count} |"""
lines.append(__lowercase)
return "\n".join(__lowercase)
if __name__ == "__main__":
snake_case__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
snake_case__ : int = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
snake_case__ : Optional[int] = get_job_links(args.workflow_run_id, token=args.token)
snake_case__ : Optional[Any] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
snake_case__ : int = k.find(""" / """)
snake_case__ : List[Any] = k[index + len(""" / """) :]
snake_case__ : List[str] = v
with open(os.path.join(args.output_dir, """job_links.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
snake_case__ : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
snake_case__ : str = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
snake_case__ : List[str] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
snake_case__ : Any = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, """errors.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
snake_case__ : Any = reduce_by_error(errors)
snake_case__ : Union[str, Any] = reduce_by_model(errors)
snake_case__ : Any = make_github_table(reduced_by_error)
snake_case__ : Optional[Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, """reduced_by_error.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, """reduced_by_model.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
| 23 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
snake_case__ : int = [[1, 2, 4], [1, 2, 3, 4]]
snake_case__ : Any = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
self.assertTrue(isinstance(dc.token_ids , __SCREAMING_SNAKE_CASE ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __UpperCamelCase ( self ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
snake_case__ : Union[str, Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) # fails here
def __UpperCamelCase ( self ):
snake_case__ : List[str] = [[1, 2, 3], [1, 2, 4]]
snake_case__ : Optional[int] = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ , snake_case__ : Any = dc.update(1 )
snake_case__ : Any = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : Tuple = dc.update(2 )
snake_case__ : Tuple = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = dc.update(3 )
snake_case__ : List[str] = stepped is True and completed is True and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
snake_case__ : int = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ , snake_case__ : str = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : str = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : Dict = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 38 | 0 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase__ : str = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2')
UpperCamelCase__ : Any = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2').to(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : List[str] = -1
UpperCamelCase__ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Dict = model.generate(__SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Tuple = tokenizer.decode(greedy_ids[0])
with CaptureStdout() as cs:
UpperCamelCase__ : List[str] = TextStreamer(__SCREAMING_SNAKE_CASE)
model.generate(__SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=__SCREAMING_SNAKE_CASE , streamer=__SCREAMING_SNAKE_CASE)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCamelCase__ : List[str] = cs.out[:-1]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def lowerCAmelCase__ ( self) -> Tuple:
UpperCamelCase__ : Tuple = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2')
UpperCamelCase__ : Any = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2').to(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Tuple = -1
UpperCamelCase__ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Dict = model.generate(__SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Any = tokenizer.decode(greedy_ids[0])
UpperCamelCase__ : Tuple = TextIteratorStreamer(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Any = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCamelCase__ : Tuple = Thread(target=model.generate , kwargs=__SCREAMING_SNAKE_CASE)
thread.start()
UpperCamelCase__ : Tuple = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def lowerCAmelCase__ ( self) -> List[str]:
UpperCamelCase__ : List[Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2')
UpperCamelCase__ : List[str] = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2').to(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : List[Any] = -1
UpperCamelCase__ : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Union[str, Any] = model.generate(__SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : str = greedy_ids[:, input_ids.shape[1] :]
UpperCamelCase__ : Tuple = tokenizer.decode(new_greedy_ids[0])
with CaptureStdout() as cs:
UpperCamelCase__ : Any = TextStreamer(__SCREAMING_SNAKE_CASE , skip_prompt=__SCREAMING_SNAKE_CASE)
model.generate(__SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=__SCREAMING_SNAKE_CASE , streamer=__SCREAMING_SNAKE_CASE)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCamelCase__ : Any = cs.out[:-1]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def lowerCAmelCase__ ( self) -> List[str]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
UpperCamelCase__ : str = AutoTokenizer.from_pretrained('distilgpt2')
UpperCamelCase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained('distilgpt2').to(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Any = -1
UpperCamelCase__ : Union[str, Any] = torch.ones((1, 5) , device=__SCREAMING_SNAKE_CASE).long() * model.config.bos_token_id
with CaptureStdout() as cs:
UpperCamelCase__ : Optional[int] = TextStreamer(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE)
model.generate(__SCREAMING_SNAKE_CASE , max_new_tokens=1 , do_sample=__SCREAMING_SNAKE_CASE , streamer=__SCREAMING_SNAKE_CASE)
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
UpperCamelCase__ : Tuple = cs.out[:-1] # Remove the final "\n"
UpperCamelCase__ : List[Any] = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='pt')
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1))
def lowerCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase__ : Dict = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2')
UpperCamelCase__ : Dict = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2').to(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Optional[Any] = -1
UpperCamelCase__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : str = TextIteratorStreamer(__SCREAMING_SNAKE_CASE , timeout=0.001)
UpperCamelCase__ : Dict = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCamelCase__ : List[str] = Thread(target=model.generate , kwargs=__SCREAMING_SNAKE_CASE)
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__SCREAMING_SNAKE_CASE):
UpperCamelCase__ : Optional[Any] = """"""
for new_text in streamer:
streamer_text += new_text
| 410 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Optional[int] = logging.get_logger(__name__)
A_ : Tuple = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''segformer'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[2, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[8, 4, 2, 1] , __SCREAMING_SNAKE_CASE=[3_2, 6_4, 1_6_0, 2_5_6] , __SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[1, 2, 5, 8] , __SCREAMING_SNAKE_CASE=[4, 4, 4, 4] , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1e-6 , __SCREAMING_SNAKE_CASE=2_5_6 , __SCREAMING_SNAKE_CASE=2_5_5 , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __SCREAMING_SNAKE_CASE , )
snake_case__ : Dict = num_channels
snake_case__ : Optional[Any] = num_encoder_blocks
snake_case__ : Any = depths
snake_case__ : Optional[int] = sr_ratios
snake_case__ : Tuple = hidden_sizes
snake_case__ : List[str] = patch_sizes
snake_case__ : str = strides
snake_case__ : Optional[int] = mlp_ratios
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : Dict = hidden_act
snake_case__ : Optional[int] = hidden_dropout_prob
snake_case__ : List[str] = attention_probs_dropout_prob
snake_case__ : List[Any] = classifier_dropout_prob
snake_case__ : int = initializer_range
snake_case__ : List[str] = drop_path_rate
snake_case__ : int = layer_norm_eps
snake_case__ : List[Any] = decoder_hidden_size
snake_case__ : List[Any] = kwargs.get("""reshape_last_stage""" , __SCREAMING_SNAKE_CASE )
snake_case__ : Dict = semantic_loss_ignore_index
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1e-4
@property
def __UpperCamelCase ( self ):
return 1_2
| 38 | 0 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
@property
def _UpperCAmelCase ( self : str ):
torch.manual_seed(0 )
__a = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def _UpperCAmelCase ( self : List[str] ):
__a = self.dummy_uncond_unet
__a = PNDMScheduler()
__a = PNDMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
pndm.to(__SCREAMING_SNAKE_CASE )
pndm.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__a = torch.manual_seed(0 )
__a = pndm(generator=__SCREAMING_SNAKE_CASE , num_inference_steps=20 , output_type="numpy" ).images
__a = torch.manual_seed(0 )
__a = pndm(generator=__SCREAMING_SNAKE_CASE , num_inference_steps=20 , output_type="numpy" , return_dict=__SCREAMING_SNAKE_CASE )[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
def _UpperCAmelCase ( self : Optional[int] ):
__a = """google/ddpm-cifar10-32"""
__a = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
__a = PNDMScheduler()
__a = PNDMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
pndm.to(__SCREAMING_SNAKE_CASE )
pndm.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__a = torch.manual_seed(0 )
__a = pndm(generator=__SCREAMING_SNAKE_CASE , output_type="numpy" ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a = np.array([0.15_64, 0.1_46_45, 0.14_06, 0.1_47_15, 0.1_24_25, 0.1_40_45, 0.1_31_15, 0.1_21_75, 0.1_25] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 197 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : List[Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = None
if token is not None:
snake_case__ : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : List[Any] = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : str = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Tuple = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : Dict = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
snake_case__ : Union[str, Any] = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : Dict = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Dict = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Dict = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : Any = result.headers["""Location"""]
snake_case__ : Tuple = requests.get(__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : int = os.path.join(__magic_name__ , f"{artifact_name}.zip" )
with open(__magic_name__ , """wb""" ) as fp:
fp.write(response.content )
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : str=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Any = []
snake_case__ : Union[str, Any] = []
snake_case__ : Any = None
with zipfile.ZipFile(__magic_name__ ) as z:
for filename in z.namelist():
if not os.path.isdir(__magic_name__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__magic_name__ ) as f:
for line in f:
snake_case__ : Any = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
snake_case__ : str = line[: line.index(""": """ )]
snake_case__ : Optional[int] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
snake_case__ : Dict = line[len("""FAILED """ ) :]
failed_tests.append(__magic_name__ )
elif filename == "job_name.txt":
snake_case__ : Optional[Any] = line
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError(
f"`errors` and `failed_tests` should have the same number of elements. Got {len(__magic_name__ )} for `errors` "
f"and {len(__magic_name__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
""" problem.""" )
snake_case__ : Optional[Any] = None
if job_name and job_links:
snake_case__ : Optional[Any] = job_links.get(__magic_name__ , __magic_name__ )
# A list with elements of the form (line of error, error, failed test)
snake_case__ : List[Any] = [x + [y] + [job_link] for x, y in zip(__magic_name__ , __magic_name__ )]
return result
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : Union[str, Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = []
snake_case__ : Dict = [os.path.join(__magic_name__ , __magic_name__ ) for p in os.listdir(__magic_name__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__magic_name__ , job_links=__magic_name__ ) )
return errors
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : str=None ) -> List[Any]:
'''simple docstring'''
snake_case__ : Any = Counter()
counter.update([x[1] for x in logs] )
snake_case__ : Dict = counter.most_common()
snake_case__ : Any = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
snake_case__ : int = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case__ : str = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
snake_case__ : Tuple = test.split("""/""" )[2]
else:
snake_case__ : Any = None
return test
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : Union[str, Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : List[str] = [(x[0], x[1], get_model(x[2] )) for x in logs]
snake_case__ : List[Any] = [x for x in logs if x[2] is not None]
snake_case__ : Any = {x[2] for x in logs}
snake_case__ : Optional[Any] = {}
for test in tests:
snake_case__ : str = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
snake_case__ : Optional[int] = counter.most_common()
snake_case__ : Optional[int] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
snake_case__ : int = sum(error_counts.values() )
if n_errors > 0:
snake_case__ : str = {"""count""": n_errors, """errors""": error_counts}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : int ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Optional[Any] = """| no. | error | status |"""
snake_case__ : int = """|-:|:-|:-|"""
snake_case__ : int = [header, sep]
for error in reduced_by_error:
snake_case__ : Union[str, Any] = reduced_by_error[error]["""count"""]
snake_case__ : Dict = f"| {count} | {error[:1_00]} | |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : Dict ) -> List[Any]:
'''simple docstring'''
snake_case__ : List[Any] = """| model | no. of errors | major error | count |"""
snake_case__ : Optional[int] = """|-:|-:|-:|-:|"""
snake_case__ : Dict = [header, sep]
for model in reduced_by_model:
snake_case__ : Tuple = reduced_by_model[model]["""count"""]
snake_case__ , snake_case__ : Tuple = list(reduced_by_model[model]["""errors"""].items() )[0]
snake_case__ : Optional[int] = f"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
A_ : int = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
A_ : Optional[int] = get_job_links(args.workflow_run_id, token=args.token)
A_ : Optional[Any] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
A_ : int = k.find(" / ")
A_ : List[Any] = k[index + len(" / ") :]
A_ : List[str] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
A_ : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
A_ : str = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
A_ : List[str] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
A_ : Any = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
A_ : Any = reduce_by_error(errors)
A_ : Union[str, Any] = reduce_by_model(errors)
A_ : Any = make_github_table(reduced_by_error)
A_ : Optional[Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 38 | 0 |
import requests
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str ) -> None:
A = {"""Content-Type""": """application/json"""}
A = requests.post(lowerCAmelCase, json={'text': message_body}, headers=lowerCAmelCase )
if response.status_code != 200:
A = (
"""Request to slack returned an error """
f'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(lowerCAmelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 699 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
A_ : Tuple = get_logger(__name__)
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = '''dummy_data'''
lowerCamelCase__ = '''datasets'''
lowerCamelCase__ = False
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , ):
snake_case__ : List[Any] = 0
snake_case__ : Union[str, Any] = dataset_name
snake_case__ : Optional[int] = cache_dir
snake_case__ : Union[str, Any] = use_local_dummy_data
snake_case__ : int = config
# download_callbacks take a single url as input
snake_case__ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
snake_case__ : Union[str, Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
snake_case__ : Union[str, Any] = str(__SCREAMING_SNAKE_CASE )
# to be downloaded
snake_case__ : List[str] = None
snake_case__ : List[str] = None
@property
def __UpperCamelCase ( self ):
if self._dummy_file is None:
snake_case__ : List[str] = self.download_dummy_data()
return self._dummy_file
@property
def __UpperCamelCase ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def __UpperCamelCase ( self ):
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
snake_case__ : Optional[int] = cached_path(
__SCREAMING_SNAKE_CASE , cache_dir=self.cache_dir , extract_compressed_file=__SCREAMING_SNAKE_CASE , force_extract=__SCREAMING_SNAKE_CASE )
return os.path.join(__SCREAMING_SNAKE_CASE , self.dummy_file_name )
@property
def __UpperCamelCase ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __UpperCamelCase ( self ):
if self._bucket_url is None:
snake_case__ : List[str] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def __UpperCamelCase ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
snake_case__ : List[Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
snake_case__ : List[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.create_dummy_data_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
return self.create_dummy_data_list(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
return self.create_dummy_data_single(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return path
def __UpperCamelCase ( self ):
return {}
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for single_url in single_urls:
download_callback(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : List[str] = single_urls
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = [os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) ) for x in single_urls]
else:
snake_case__ : List[Any] = single_urls
snake_case__ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) )
snake_case__ : Optional[int] = value
# make sure that values are unique
if all(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
snake_case__ : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
snake_case__ : Tuple = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , __SCREAMING_SNAKE_CASE ) ) for url in data_url )
snake_case__ : List[Any] = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
snake_case__ : List[str] = [data_url[0]] * len(__SCREAMING_SNAKE_CASE )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : List[Any] = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(__SCREAMING_SNAKE_CASE )
return dummy_data_list
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : Any = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(__SCREAMING_SNAKE_CASE ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
def _iter_archive_members(__SCREAMING_SNAKE_CASE ):
# this preserves the order of the members inside the ZIP archive
snake_case__ : List[str] = Path(self.dummy_file ).parent
snake_case__ : Dict = path.relative_to(__SCREAMING_SNAKE_CASE )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
snake_case__ : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = Path(__SCREAMING_SNAKE_CASE )
snake_case__ : int = _iter_archive_members(__SCREAMING_SNAKE_CASE ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(__SCREAMING_SNAKE_CASE ).as_posix(), file_path.open("""rb""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = [paths]
for path in paths:
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(__SCREAMING_SNAKE_CASE ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 38 | 0 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class _A :
'''simple docstring'''
def __init__( self : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : Any=13 , lowerCamelCase : Optional[int]=7 , lowerCamelCase : List[str]=True , lowerCamelCase : List[str]=True , lowerCamelCase : Dict=True , lowerCamelCase : Optional[Any]=True , lowerCamelCase : int=99 , lowerCamelCase : Optional[int]=64 , lowerCamelCase : str=32 , lowerCamelCase : Optional[Any]=5 , lowerCamelCase : List[str]=4 , lowerCamelCase : List[str]=37 , lowerCamelCase : Optional[int]="gelu" , lowerCamelCase : List[str]=0.1 , lowerCamelCase : Union[str, Any]=0.1 , lowerCamelCase : str=512 , lowerCamelCase : Tuple=16 , lowerCamelCase : Tuple=2 , lowerCamelCase : Tuple=0.02 , lowerCamelCase : Any=3 , lowerCamelCase : List[Any]=4 , lowerCamelCase : Optional[int]=None , ):
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = embedding_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def _snake_case ( self : int ):
'''simple docstring'''
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self : str ):
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def _snake_case ( self : List[str] , lowerCamelCase : Any , lowerCamelCase : int , lowerCamelCase : List[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : List[Any] ):
'''simple docstring'''
__lowercase = MegatronBertModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowercase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
__lowercase = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
__lowercase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self : int , lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : str , lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple ):
'''simple docstring'''
__lowercase = MegatronBertForMaskedLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowercase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : Any , lowerCamelCase : Tuple , lowerCamelCase : Any , lowerCamelCase : str , lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : Any , lowerCamelCase : List[Any] ):
'''simple docstring'''
__lowercase = MegatronBertForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowercase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Tuple ):
'''simple docstring'''
__lowercase = MegatronBertForNextSentencePrediction(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowercase = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _snake_case ( self : Tuple , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Dict , lowerCamelCase : Any , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = MegatronBertForPreTraining(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowercase = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , next_sentence_label=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowercase = MegatronBertForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowercase = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : Dict , lowerCamelCase : Any , lowerCamelCase : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : List[str] ):
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = MegatronBertForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowercase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : Any , lowerCamelCase : List[str] , lowerCamelCase : Optional[int] , lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : List[str] , lowerCamelCase : List[str] , lowerCamelCase : Dict ):
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = MegatronBertForTokenClassification(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowercase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self : Optional[int] , lowerCamelCase : str , lowerCamelCase : Optional[int] , lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Any ):
'''simple docstring'''
__lowercase = self.num_choices
__lowercase = MegatronBertForMultipleChoice(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
(
__lowercase
) = config_and_inputs
__lowercase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _A ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_snake_case : Tuple = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
_snake_case : int = (
{
"""feature-extraction""": MegatronBertModel,
"""fill-mask""": MegatronBertForMaskedLM,
"""question-answering""": MegatronBertForQuestionAnswering,
"""text-classification""": MegatronBertForSequenceClassification,
"""text-generation""": MegatronBertForCausalLM,
"""token-classification""": MegatronBertForTokenClassification,
"""zero-shot""": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case : int = True
# test_resize_embeddings = False
_snake_case : Dict = False
def _snake_case ( self : int , lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : Any=False ):
'''simple docstring'''
__lowercase = super()._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(__SCREAMING_SNAKE_CASE ):
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
__lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
return inputs_dict
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = MegatronBertModelTester(self )
__lowercase = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def _snake_case ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__SCREAMING_SNAKE_CASE )
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__SCREAMING_SNAKE_CASE )
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__SCREAMING_SNAKE_CASE )
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__SCREAMING_SNAKE_CASE )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__SCREAMING_SNAKE_CASE )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
return torch.tensor(
_SCREAMING_SNAKE_CASE , dtype=torch.long , device=_SCREAMING_SNAKE_CASE , )
snake_case__ : str = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip("Model is not available." )
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowercase = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
__lowercase = os.path.join(os.environ["MYDIR"] , __SCREAMING_SNAKE_CASE )
__lowercase = MegatronBertModel.from_pretrained(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.half()
__lowercase = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
__lowercase = model(__SCREAMING_SNAKE_CASE )[0]
__lowercase = torch.Size((1, 9, 1_024) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__lowercase = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
__lowercase = output[0, ii, jj]
__lowercase = expected[3 * ii + jj]
__lowercase = """ii={} jj={} a={} b={}""".format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertTrue(math.isclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , rel_tol=__SCREAMING_SNAKE_CASE , abs_tol=__SCREAMING_SNAKE_CASE ) , msg=__SCREAMING_SNAKE_CASE )
| 402 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = IFImgaImgSuperResolutionPipeline
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __UpperCamelCase ( self ):
return self._get_superresolution_dummy_components()
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
snake_case__ : List[Any] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : Tuple = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __UpperCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __UpperCamelCase ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __UpperCamelCase ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __UpperCamelCase ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __UpperCamelCase ( self ):
self._test_save_load_local()
def __UpperCamelCase ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 38 | 0 |
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )-> bool:
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(UpperCAmelCase ) )
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )-> bool:
'''simple docstring'''
if index == len(UpperCAmelCase ):
return True
# Recursive Step
for i in range(UpperCAmelCase ):
if valid_coloring(graph[index] ,UpperCAmelCase ,UpperCAmelCase ):
# Color current vertex
SCREAMING_SNAKE_CASE_ = i
# Validate coloring
if util_color(UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,index + 1 ):
return True
# Backtrack
SCREAMING_SNAKE_CASE_ = -1
return False
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase )-> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = [-1] * len(UpperCAmelCase )
if util_color(UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,0 ):
return colored_vertices
return []
| 393 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
A_ : Dict = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : List[Any]=None , __magic_name__ : List[str]=None , __magic_name__ : List[str]=None ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[int] = True
while ask_again:
snake_case__ : Optional[Any] = input(__magic_name__ )
try:
if default is not None and len(__magic_name__ ) == 0:
return default
return convert_value(__magic_name__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Any=[] , __magic_name__ : Optional[int]=None , __magic_name__ : int=0 ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Union[str, Any] = BulletMenu(__magic_name__ , __magic_name__ )
snake_case__ : Optional[Any] = menu.run(default_choice=__magic_name__ )
return convert_value(__magic_name__ ) if convert_value is not None else result
def UpperCamelCase__ ( __magic_name__ : Any ) -> int:
'''simple docstring'''
snake_case__ : Tuple = int(__magic_name__ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def UpperCamelCase__ ( __magic_name__ : str ) -> Tuple:
'''simple docstring'''
snake_case__ : List[Any] = int(__magic_name__ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def UpperCamelCase__ ( __magic_name__ : List[str] ) -> List[Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = int(__magic_name__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def UpperCamelCase__ ( __magic_name__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = int(__magic_name__ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def UpperCamelCase__ ( __magic_name__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = int(__magic_name__ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def UpperCamelCase__ ( __magic_name__ : Dict ) -> Tuple:
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class __snake_case ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = super()._format_usage(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : str = usage.replace("""<command> [<args>] """ , """""" )
return usage
| 38 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
SCREAMING_SNAKE_CASE__ : List[str] =logging.get_logger(__name__)
class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__snake_case = ["""input_values""", """padding_mask"""]
def __init__( self , _lowercase = 1 , _lowercase = 24000 , _lowercase = 0.0 , _lowercase = None , _lowercase = None , **_lowercase , ) -> Dict:
super().__init__(feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
_lowerCamelCase : Optional[int] = chunk_length_s
_lowerCamelCase : Union[str, Any] = overlap
@property
def a__ ( self ) -> str:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def a__ ( self ) -> Optional[Any]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , _lowercase , _lowercase = None , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , ) -> Optional[Any]:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if padding and truncation:
raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' )
elif padding is None:
# by default let's pad the inputs
_lowerCamelCase : Tuple = True
_lowerCamelCase : Dict = bool(
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
_lowerCamelCase : Optional[Any] = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
_lowerCamelCase : int = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
_lowerCamelCase : Dict = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCamelCase : int = [np.asarray(__SCREAMING_SNAKE_CASE ).T]
# verify inputs are valid
for idx, example in enumerate(__SCREAMING_SNAKE_CASE ):
if example.ndim > 2:
raise ValueError(F'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'''Expected stereo audio but example has {example.shape[-1]} channels''' )
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : Union[str, Any] = BatchFeature({'''input_values''': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
_lowerCamelCase : str = min(array.shape[0] for array in raw_audio )
_lowerCamelCase : Optional[int] = int(np.floor(max_length / self.chunk_stride ) )
_lowerCamelCase : Optional[Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
_lowerCamelCase : Tuple = max(array.shape[0] for array in raw_audio )
_lowerCamelCase : Any = int(np.ceil(max_length / self.chunk_stride ) )
_lowerCamelCase : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length
_lowerCamelCase : int = """max_length"""
else:
_lowerCamelCase : List[Any] = input_values
# normal padding on batch
if padded_inputs is None:
_lowerCamelCase : Dict = self.pad(
__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , )
if padding:
_lowerCamelCase : Dict = padded_inputs.pop('''attention_mask''' )
_lowerCamelCase : Optional[int] = []
for example in padded_inputs.pop('''input_values''' ):
if self.feature_size == 1:
_lowerCamelCase : Any = example[..., None]
input_values.append(example.T )
_lowerCamelCase : Any = input_values
if return_tensors is not None:
_lowerCamelCase : List[str] = padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE )
return padded_inputs
| 434 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( __magic_name__ : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(__magic_name__ ) / len(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.