code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : Union[str, Any] = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
UpperCAmelCase_ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : int = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 | 1 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_ ( _lowercase , unittest.TestCase ):
_lowercase : List[Any] = AudioLDMPipeline
_lowercase : Optional[int] = TEXT_TO_AUDIO_PARAMS
_lowercase : str = TEXT_TO_AUDIO_BATCH_PARAMS
_lowercase : Tuple = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def lowerCAmelCase_ ( self : List[Any] ):
torch.manual_seed(0 )
__A : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=__A , )
__A : List[str] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
__A : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__A : str = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
__A : int = ClapTextModelWithProjection(__A )
__A : str = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 )
__A : List[str] = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__A , )
__A : List[Any] = SpeechTaHifiGan(__A )
__A : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def lowerCAmelCase_ ( self : Dict , __A : Tuple , __A : str=0 ):
if str(__A ).startswith("""mps""" ):
__A : Tuple = torch.manual_seed(__A )
else:
__A : Optional[Any] = torch.Generator(device=__A ).manual_seed(__A )
__A : List[str] = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def lowerCAmelCase_ ( self : Optional[Any] ):
__A : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
__A : Tuple = self.get_dummy_components()
__A : Dict = AudioLDMPipeline(**__A )
__A : int = audioldm_pipe.to(__A )
audioldm_pipe.set_progress_bar_config(disable=__A )
__A : List[Any] = self.get_dummy_inputs(__A )
__A : Optional[Any] = audioldm_pipe(**__A )
__A : List[str] = output.audios[0]
assert audio.ndim == 1
assert len(__A ) == 256
__A : str = audio[:10]
__A : List[str] = np.array(
[-0.0_0_5_0, 0.0_0_5_0, -0.0_0_6_0, 0.0_0_3_3, -0.0_0_2_6, 0.0_0_3_3, -0.0_0_2_7, 0.0_0_3_3, -0.0_0_2_8, 0.0_0_3_3] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def lowerCAmelCase_ ( self : Union[str, Any] ):
__A : str = self.get_dummy_components()
__A : List[str] = AudioLDMPipeline(**__A )
__A : Any = audioldm_pipe.to(__A )
__A : Any = audioldm_pipe.to(__A )
audioldm_pipe.set_progress_bar_config(disable=__A )
__A : int = self.get_dummy_inputs(__A )
__A : List[str] = 3 * [inputs["""prompt"""]]
# forward
__A : List[Any] = audioldm_pipe(**__A )
__A : Tuple = output.audios[0]
__A : Any = self.get_dummy_inputs(__A )
__A : Optional[Any] = 3 * [inputs.pop("""prompt""" )]
__A : str = audioldm_pipe.tokenizer(
__A , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__A , return_tensors="""pt""" , )
__A : Optional[int] = text_inputs["""input_ids"""].to(__A )
__A : int = audioldm_pipe.text_encoder(
__A , )
__A : Optional[int] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__A : Optional[int] = F.normalize(__A , dim=-1 )
__A : List[str] = prompt_embeds
# forward
__A : Union[str, Any] = audioldm_pipe(**__A )
__A : str = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def lowerCAmelCase_ ( self : Optional[int] ):
__A : Tuple = self.get_dummy_components()
__A : str = AudioLDMPipeline(**__A )
__A : Union[str, Any] = audioldm_pipe.to(__A )
__A : Optional[int] = audioldm_pipe.to(__A )
audioldm_pipe.set_progress_bar_config(disable=__A )
__A : str = self.get_dummy_inputs(__A )
__A : Dict = 3 * ["""this is a negative prompt"""]
__A : List[Any] = negative_prompt
__A : Tuple = 3 * [inputs["""prompt"""]]
# forward
__A : Union[str, Any] = audioldm_pipe(**__A )
__A : Tuple = output.audios[0]
__A : Tuple = self.get_dummy_inputs(__A )
__A : Optional[Any] = 3 * [inputs.pop("""prompt""" )]
__A : Tuple = []
for p in [prompt, negative_prompt]:
__A : Any = audioldm_pipe.tokenizer(
__A , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__A , return_tensors="""pt""" , )
__A : Union[str, Any] = text_inputs["""input_ids"""].to(__A )
__A : List[Any] = audioldm_pipe.text_encoder(
__A , )
__A : Union[str, Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__A : str = F.normalize(__A , dim=-1 )
embeds.append(__A )
__A , __A : Optional[Any] = embeds
# forward
__A : List[Any] = audioldm_pipe(**__A )
__A : Any = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def lowerCAmelCase_ ( self : Any ):
__A : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__A : Union[str, Any] = self.get_dummy_components()
__A : Dict = PNDMScheduler(skip_prk_steps=__A )
__A : Any = AudioLDMPipeline(**__A )
__A : List[Any] = audioldm_pipe.to(__A )
audioldm_pipe.set_progress_bar_config(disable=__A )
__A : Tuple = self.get_dummy_inputs(__A )
__A : int = """egg cracking"""
__A : List[str] = audioldm_pipe(**__A , negative_prompt=__A )
__A : str = output.audios[0]
assert audio.ndim == 1
assert len(__A ) == 256
__A : List[Any] = audio[:10]
__A : Union[str, Any] = np.array(
[-0.0_0_5_1, 0.0_0_5_0, -0.0_0_6_0, 0.0_0_3_4, -0.0_0_2_6, 0.0_0_3_3, -0.0_0_2_7, 0.0_0_3_3, -0.0_0_2_8, 0.0_0_3_2] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def lowerCAmelCase_ ( self : Union[str, Any] ):
__A : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
__A : Optional[int] = self.get_dummy_components()
__A : Any = PNDMScheduler(skip_prk_steps=__A )
__A : Union[str, Any] = AudioLDMPipeline(**__A )
__A : Optional[Any] = audioldm_pipe.to(__A )
audioldm_pipe.set_progress_bar_config(disable=__A )
__A : List[str] = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
__A : Dict = audioldm_pipe(__A , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
__A : List[str] = 2
__A : str = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
__A : List[str] = 2
__A : int = audioldm_pipe(__A , num_inference_steps=2 , num_waveforms_per_prompt=__A ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
__A : Union[str, Any] = 2
__A : Any = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__A ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def lowerCAmelCase_ ( self : Any ):
__A : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
__A : str = self.get_dummy_components()
__A : Union[str, Any] = AudioLDMPipeline(**__A )
__A : Optional[int] = audioldm_pipe.to(__A )
audioldm_pipe.set_progress_bar_config(disable=__A )
__A : int = audioldm_pipe.vocoder.config.sampling_rate
__A : Tuple = self.get_dummy_inputs(__A )
__A : Optional[Any] = audioldm_pipe(audio_length_in_s=0.0_1_6 , **__A )
__A : Union[str, Any] = output.audios[0]
assert audio.ndim == 1
assert len(__A ) / vocoder_sampling_rate == 0.0_1_6
__A : Dict = audioldm_pipe(audio_length_in_s=0.0_3_2 , **__A )
__A : Optional[int] = output.audios[0]
assert audio.ndim == 1
assert len(__A ) / vocoder_sampling_rate == 0.0_3_2
def lowerCAmelCase_ ( self : int ):
__A : List[Any] = self.get_dummy_components()
__A : Dict = AudioLDMPipeline(**__A )
__A : Union[str, Any] = audioldm_pipe.to(__A )
audioldm_pipe.set_progress_bar_config(disable=__A )
__A : int = ["""hey"""]
__A : int = audioldm_pipe(__A , num_inference_steps=1 )
__A : List[Any] = output.audios.shape
assert audio_shape == (1, 256)
__A : List[str] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
__A : Union[str, Any] = SpeechTaHifiGan(__A ).to(__A )
__A : Union[str, Any] = audioldm_pipe(__A , num_inference_steps=1 )
__A : Optional[Any] = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def lowerCAmelCase_ ( self : int ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__A )
def lowerCAmelCase_ ( self : str ):
self._test_inference_batch_single_identical(test_mean_pixel_difference=__A )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase_ ( self : Optional[Any] ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__A )
@slow
class lowerCamelCase_ ( unittest.TestCase ):
def lowerCAmelCase_ ( self : Dict ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : str , __A : List[str] , __A : Dict="cpu" , __A : int=torch.floataa , __A : List[str]=0 ):
__A : str = torch.Generator(device=__A ).manual_seed(__A )
__A : str = np.random.RandomState(__A ).standard_normal((1, 8, 128, 16) )
__A : str = torch.from_numpy(__A ).to(device=__A , dtype=__A )
__A : Tuple = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def lowerCAmelCase_ ( self : Dict ):
__A : Any = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
__A : Optional[int] = audioldm_pipe.to(__A )
audioldm_pipe.set_progress_bar_config(disable=__A )
__A : Optional[Any] = self.get_inputs(__A )
__A : Dict = 25
__A : Tuple = audioldm_pipe(**__A ).audios[0]
assert audio.ndim == 1
assert len(__A ) == 8_1920
__A : Tuple = audio[7_7230:7_7240]
__A : Optional[Any] = np.array(
[-0.4_8_8_4, -0.4_6_0_7, 0.0_0_2_3, 0.5_0_0_7, 0.5_8_9_6, 0.5_1_5_1, 0.3_8_1_3, -0.0_2_0_8, -0.3_6_8_7, -0.4_3_1_5] )
__A : Optional[Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def lowerCAmelCase_ ( self : List[Any] ):
__A : int = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
__A : Any = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
__A : List[Any] = audioldm_pipe.to(__A )
audioldm_pipe.set_progress_bar_config(disable=__A )
__A : Tuple = self.get_inputs(__A )
__A : Dict = audioldm_pipe(**__A ).audios[0]
assert audio.ndim == 1
assert len(__A ) == 8_1920
__A : str = audio[2_7780:2_7790]
__A : str = np.array([-0.2_1_3_1, -0.0_8_7_3, -0.0_1_2_4, -0.0_1_8_9, 0.0_5_6_9, 0.1_3_7_3, 0.1_8_8_3, 0.2_8_8_6, 0.3_2_9_7, 0.2_2_1_2] )
__A : Union[str, Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 17 |
import math
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , __A : List[str]=0 ): # a graph with Node 0,1,...,N-1
__A : List[str] = n
__A : List[str] = [
[math.inf for j in range(0 , __A )] for i in range(0 , __A )
] # adjacency matrix for weight
__A : str = [
[math.inf for j in range(0 , __A )] for i in range(0 , __A )
] # dp[i][j] stores minimum distance from i to j
def lowerCAmelCase_ ( self : str , __A : Union[str, Any] , __A : Any , __A : Optional[int] ):
__A : List[Any] = w
def lowerCAmelCase_ ( self : Union[str, Any] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__A : List[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def lowerCAmelCase_ ( self : int , __A : List[str] , __A : List[str] ):
return self.dp[u][v]
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 17 | 1 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( a__ : Optional[Any] ,a__ : Union[str, Any] ,a__ : Optional[int] ) -> List[Any]:
# Initialise PyTorch model
__A : Dict = MobileBertConfig.from_json_file(a__ )
print(f"""Building PyTorch model from configuration: {config}""" )
__A : Tuple = MobileBertForPreTraining(a__ )
# Load weights from tf checkpoint
__A : Dict = load_tf_weights_in_mobilebert(a__ ,a__ ,a__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() ,a__ )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 17 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : complex ,a__ : str = "x" ,a__ : float = 10**-10 ,a__ : int = 1 ,) -> complex:
__A : Tuple = symbols(a__ )
__A : List[str] = lambdify(a__ ,a__ )
__A : Any = lambdify(a__ ,diff(a__ ,a__ ) )
__A : Dict = starting_point
while True:
if diff_function(a__ ) != 0:
__A : Optional[int] = prev_guess - multiplicity * func(a__ ) / diff_function(
a__ )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__A : List[Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 17 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCAmelCase_ : List[str] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-classification/requirements.txt''')
UpperCAmelCase_ : Optional[Any] = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
UpperCAmelCase_ : Dict = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def __SCREAMING_SNAKE_CASE ( a__ : str ) -> List[Any]:
with open(a__ ,"""rb""" ) as f:
__A : Tuple = Image.open(a__ )
return im.convert("""RGB""" )
@dataclass
class lowerCamelCase_ :
_lowercase : Optional[str] = field(
default=_lowercase , metadata={
'''help''': '''Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'''
} , )
_lowercase : Optional[str] = field(
default=_lowercase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
_lowercase : Optional[str] = field(default=_lowercase , metadata={'''help''': '''A folder containing the training data.'''} )
_lowercase : Optional[str] = field(default=_lowercase , metadata={'''help''': '''A folder containing the validation data.'''} )
_lowercase : Optional[float] = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
_lowercase : Optional[int] = field(
default=_lowercase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
_lowercase : Optional[int] = field(
default=_lowercase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"""You must specify either a dataset name from the hub or a train and/or validation directory.""" )
@dataclass
class lowerCamelCase_ :
_lowercase : str = field(
default='''google/vit-base-patch16-224-in21k''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
_lowercase : Optional[str] = field(
default=_lowercase , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(_lowercase )} , )
_lowercase : Optional[str] = field(
default=_lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowercase : Optional[str] = field(
default=_lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
_lowercase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_lowercase : str = field(default=_lowercase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
_lowercase : bool = field(
default=_lowercase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
_lowercase : bool = field(
default=_lowercase , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ) -> Tuple:
__A : int = torch.stack([example["""pixel_values"""] for example in examples] )
__A : Dict = torch.tensor([example["""labels"""] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__A : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__A , __A , __A : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__A , __A , __A : int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_image_classification""" ,a__ ,a__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__A : str = training_args.get_process_log_level()
logger.setLevel(a__ )
transformers.utils.logging.set_verbosity(a__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__A : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__A : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
__A : List[Any] = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,cache_dir=model_args.cache_dir ,task="""image-classification""" ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
__A : Optional[Any] = {}
if data_args.train_dir is not None:
__A : Any = os.path.join(data_args.train_dir ,"""**""" )
if data_args.validation_dir is not None:
__A : List[str] = os.path.join(data_args.validation_dir ,"""**""" )
__A : List[str] = load_dataset(
"""imagefolder""" ,data_files=a__ ,cache_dir=model_args.cache_dir ,task="""image-classification""" ,)
# If we don't have a validation split, split off a percentage of train as validation.
__A : Tuple = None if """validation""" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split ,a__ ) and data_args.train_val_split > 0.0:
__A : int = dataset["""train"""].train_test_split(data_args.train_val_split )
__A : str = split["""train"""]
__A : int = split["""test"""]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__A : Dict = dataset["""train"""].features["""labels"""].names
__A , __A : Union[str, Any] = {}, {}
for i, label in enumerate(a__ ):
__A : Optional[int] = str(a__ )
__A : Optional[Any] = label
# Load the accuracy metric from the datasets package
__A : Tuple = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(a__ : int ):
return metric.compute(predictions=np.argmax(p.predictions ,axis=1 ) ,references=p.label_ids )
__A : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path ,num_labels=len(a__ ) ,labelaid=a__ ,idalabel=a__ ,finetuning_task="""image-classification""" ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
__A : Tuple = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) ,config=a__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
__A : Union[str, Any] = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
__A : int = image_processor.size["""shortest_edge"""]
else:
__A : List[Any] = (image_processor.size["""height"""], image_processor.size["""width"""])
__A : Optional[int] = Normalize(mean=image_processor.image_mean ,std=image_processor.image_std )
__A : Dict = Compose(
[
RandomResizedCrop(a__ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
__A : Tuple = Compose(
[
Resize(a__ ),
CenterCrop(a__ ),
ToTensor(),
normalize,
] )
def train_transforms(a__ : int ):
__A : int = [
_train_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["""image"""]
]
return example_batch
def val_transforms(a__ : Union[str, Any] ):
__A : List[Any] = [_val_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["""image"""]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
__A : Any = (
dataset["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(a__ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
__A : str = (
dataset["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(a__ )
# Initalize our trainer
__A : List[Any] = Trainer(
model=a__ ,args=a__ ,train_dataset=dataset["""train"""] if training_args.do_train else None ,eval_dataset=dataset["""validation"""] if training_args.do_eval else None ,compute_metrics=a__ ,tokenizer=a__ ,data_collator=a__ ,)
# Training
if training_args.do_train:
__A : Tuple = None
if training_args.resume_from_checkpoint is not None:
__A : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__A : List[str] = last_checkpoint
__A : str = trainer.train(resume_from_checkpoint=a__ )
trainer.save_model()
trainer.log_metrics("""train""" ,train_result.metrics )
trainer.save_metrics("""train""" ,train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__A : Optional[int] = trainer.evaluate()
trainer.log_metrics("""eval""" ,a__ )
trainer.save_metrics("""eval""" ,a__ )
# Write model card and (optionally) push to hub
__A : List[str] = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """image-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""image-classification""", """vision"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**a__ )
else:
trainer.create_model_card(**a__ )
if __name__ == "__main__":
main()
| 17 |
from math import sqrt
def __SCREAMING_SNAKE_CASE ( a__ : int = 1000000 ) -> int:
__A : int = 0
__A : int = 0
__A : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(a__ ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 17 | 1 |
from collections.abc import Sequence
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[int] | None = None ) -> int:
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
__A : Dict = nums[0]
for i in range(1 ,len(a__ ) ):
__A : List[Any] = nums[i]
__A : Optional[Any] = max(a__ ,ans + num ,a__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
UpperCAmelCase_ : List[Any] = int(input('''Enter number of elements : ''').strip())
UpperCAmelCase_ : Optional[int] = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 17 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase_ ( _lowercase ):
_lowercase : List[str] = ['''pixel_values''']
def __init__( self : Dict , __A : bool = True , __A : Optional[Dict[str, int]] = None , __A : PILImageResampling = PILImageResampling.BILINEAR , __A : bool = True , __A : Dict[str, int] = None , __A : bool = True , __A : Union[int, float] = 1 / 255 , __A : bool = True , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , **__A : int , ):
super().__init__(**__A )
__A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 256}
__A : Dict = get_size_dict(__A , default_to_square=__A )
__A : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__A : int = get_size_dict(__A , param_name="""crop_size""" )
__A : str = do_resize
__A : Dict = size
__A : Any = resample
__A : Optional[Any] = do_center_crop
__A : List[str] = crop_size
__A : Optional[int] = do_rescale
__A : int = rescale_factor
__A : Union[str, Any] = do_normalize
__A : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__A : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase_ ( self : Optional[Any] , __A : np.ndarray , __A : Dict[str, int] , __A : PILImageResampling = PILImageResampling.BICUBIC , __A : Optional[Union[str, ChannelDimension]] = None , **__A : List[Any] , ):
__A : str = get_size_dict(__A , default_to_square=__A )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__A : Dict = get_resize_output_image_size(__A , size=size["""shortest_edge"""] , default_to_square=__A )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : Tuple , __A : np.ndarray , __A : Dict[str, int] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : str , ):
__A : str = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(__A , size=(size["""height"""], size["""width"""]) , data_format=__A , **__A )
def lowerCAmelCase_ ( self : List[str] , __A : np.ndarray , __A : float , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Optional[int] ):
return rescale(__A , scale=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : Any , __A : np.ndarray , __A : Union[float, List[float]] , __A : Union[float, List[float]] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Tuple , ):
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : int , __A : ImageInput , __A : Optional[bool] = None , __A : Dict[str, int] = None , __A : PILImageResampling = None , __A : bool = None , __A : Dict[str, int] = None , __A : Optional[bool] = None , __A : Optional[float] = None , __A : Optional[bool] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[str, TensorType]] = None , __A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__A : Optional[int] , ):
__A : List[str] = do_resize if do_resize is not None else self.do_resize
__A : Any = size if size is not None else self.size
__A : Union[str, Any] = get_size_dict(__A , default_to_square=__A )
__A : Tuple = resample if resample is not None else self.resample
__A : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
__A : List[Any] = crop_size if crop_size is not None else self.crop_size
__A : int = get_size_dict(__A , param_name="""crop_size""" )
__A : Tuple = do_rescale if do_rescale is not None else self.do_rescale
__A : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__A : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
__A : Optional[int] = image_mean if image_mean is not None else self.image_mean
__A : List[str] = image_std if image_std is not None else self.image_std
__A : Union[str, Any] = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__A : Union[str, Any] = [to_numpy_array(__A ) for image in images]
if do_resize:
__A : int = [self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_center_crop:
__A : Optional[Any] = [self.center_crop(image=__A , size=__A ) for image in images]
if do_rescale:
__A : List[Any] = [self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
__A : Any = [self.normalize(image=__A , mean=__A , std=__A ) for image in images]
__A : int = [to_channel_dimension_format(__A , __A ) for image in images]
__A : Tuple = {"""pixel_values""": images}
return BatchFeature(data=__A , tensor_type=__A )
def lowerCAmelCase_ ( self : int , __A : List[str] , __A : List[Tuple] = None ):
__A : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__A ) != len(__A ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(__A ):
__A : str = target_sizes.numpy()
__A : int = []
for idx in range(len(__A ) ):
__A : Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=__A )
__A : Union[str, Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__A )
else:
__A : List[str] = logits.argmax(dim=1 )
__A : Tuple = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 17 | 1 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase_ ( _lowercase ):
_lowercase : Dict = ['''image_processor''', '''tokenizer''']
_lowercase : Any = '''BlipImageProcessor'''
_lowercase : List[str] = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Tuple , __A : Tuple , __A : Dict ):
__A : Optional[int] = False
super().__init__(__A , __A )
__A : Union[str, Any] = self.image_processor
def __call__( self : List[str] , __A : ImageInput = None , __A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __A : bool = True , __A : Union[bool, str, PaddingStrategy] = False , __A : Union[bool, str, TruncationStrategy] = None , __A : Optional[int] = None , __A : int = 0 , __A : Optional[int] = None , __A : Optional[bool] = None , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = True , __A : Optional[Union[str, TensorType]] = None , **__A : int , ):
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
__A : Any = self.tokenizer
__A : Optional[Any] = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
return text_encoding
# add pixel_values
__A : Tuple = self.image_processor(__A , return_tensors=__A )
if text is not None:
__A : Dict = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
else:
__A : Optional[Any] = None
if text_encoding is not None:
encoding_image_processor.update(__A )
return encoding_image_processor
def lowerCAmelCase_ ( self : str , *__A : Optional[int] , **__A : List[str] ):
return self.tokenizer.batch_decode(*__A , **__A )
def lowerCAmelCase_ ( self : List[Any] , *__A : str , **__A : List[str] ):
return self.tokenizer.decode(*__A , **__A )
@property
def lowerCAmelCase_ ( self : List[Any] ):
__A : Dict = self.tokenizer.model_input_names
__A : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 17 |
class lowerCamelCase_ :
def __init__( self : Dict , __A : Tuple , __A : Optional[int] , __A : int ):
__A : List[str] = name
__A : Optional[int] = value
__A : Optional[Any] = weight
def __repr__( self : Any ):
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def lowerCAmelCase_ ( self : Union[str, Any] ):
return self.value
def lowerCAmelCase_ ( self : str ):
return self.name
def lowerCAmelCase_ ( self : str ):
return self.weight
def lowerCAmelCase_ ( self : Dict ):
return self.value / self.weight
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : Optional[int] ,a__ : Union[str, Any] ) -> int:
__A : Tuple = []
for i in range(len(a__ ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def __SCREAMING_SNAKE_CASE ( a__ : Tuple ,a__ : Any ,a__ : Optional[int] ) -> Tuple:
__A : Optional[int] = sorted(a__ ,key=a__ ,reverse=a__ )
__A : Optional[Any] = []
__A , __A : Tuple = 0.0, 0.0
for i in range(len(a__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 1 |
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
set_seed(770)
UpperCAmelCase_ : Union[str, Any] = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
UpperCAmelCase_ : List[str] = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
UpperCAmelCase_ : List[Any] = os.path.dirname(os.path.abspath(__file__))
UpperCAmelCase_ : Union[str, Any] = os.path.join(os.path.expanduser('''~'''), '''.cache''')
UpperCAmelCase_ : Optional[int] = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def __SCREAMING_SNAKE_CASE ( a__ : Dict ,a__ : Union[str, Any]=False ) -> List[str]:
__A : Optional[int] = model_type
if use_small:
key += "_small"
return os.path.join(a__ ,REMOTE_MODEL_PATHS[key]["""file_name"""] )
def __SCREAMING_SNAKE_CASE ( a__ : Union[str, Any] ,a__ : Optional[int] ) -> Tuple:
os.makedirs(a__ ,exist_ok=a__ )
hf_hub_download(repo_id=a__ ,filename=a__ ,local_dir=a__ )
def __SCREAMING_SNAKE_CASE ( a__ : Optional[Any] ,a__ : str ,a__ : List[str]=False ,a__ : int="text" ) -> Dict:
if model_type == "text":
__A : List[str] = BarkSemanticModel
__A : Union[str, Any] = BarkSemanticConfig
__A : Union[str, Any] = BarkSemanticGenerationConfig
elif model_type == "coarse":
__A : str = BarkCoarseModel
__A : Optional[int] = BarkCoarseConfig
__A : Any = BarkCoarseGenerationConfig
elif model_type == "fine":
__A : List[Any] = BarkFineModel
__A : Optional[int] = BarkFineConfig
__A : str = BarkFineGenerationConfig
else:
raise NotImplementedError()
__A : List[str] = f"""{model_type}_small""" if use_small else model_type
__A : str = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(a__ ):
logger.info(f"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["""repo_id"""] ,model_info["""file_name"""] )
__A : Optional[int] = torch.load(a__ ,map_location=a__ )
# this is a hack
__A : str = checkpoint["""model_args"""]
if "input_vocab_size" not in model_args:
__A : Tuple = model_args["""vocab_size"""]
__A : Any = model_args["""vocab_size"""]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
__A : Any = model_args.pop("""n_head""" )
__A : Any = model_args.pop("""n_embd""" )
__A : str = model_args.pop("""n_layer""" )
__A : Optional[Any] = ConfigClass(**checkpoint["""model_args"""] )
__A : Union[str, Any] = ModelClass(config=a__ )
__A : List[Any] = GenerationConfigClass()
__A : List[Any] = model_generation_config
__A : Union[str, Any] = checkpoint["""model"""]
# fixup checkpoint
__A : List[str] = """_orig_mod."""
for k, v in list(state_dict.items() ):
if k.startswith(a__ ):
# replace part of the key with corresponding layer name in HF implementation
__A : List[Any] = k[len(a__ ) :]
for old_layer_name in new_layer_name_dict:
__A : List[str] = new_k.replace(a__ ,new_layer_name_dict[old_layer_name] )
__A : Union[str, Any] = state_dict.pop(a__ )
__A : str = set(state_dict.keys() ) - set(model.state_dict().keys() )
__A : int = {k for k in extra_keys if not k.endswith(""".attn.bias""" )}
__A : Union[str, Any] = set(model.state_dict().keys() ) - set(state_dict.keys() )
__A : Any = {k for k in missing_keys if not k.endswith(""".attn.bias""" )}
if len(a__ ) != 0:
raise ValueError(f"""extra keys found: {extra_keys}""" )
if len(a__ ) != 0:
raise ValueError(f"""missing keys: {missing_keys}""" )
model.load_state_dict(a__ ,strict=a__ )
__A : List[str] = model.num_parameters(exclude_embeddings=a__ )
__A : Optional[Any] = checkpoint["""best_val_loss"""].item()
logger.info(f"""model loaded: {round(n_params/1E6 ,1 )}M params, {round(a__ ,3 )} loss""" )
model.eval()
model.to(a__ )
del checkpoint, state_dict
return model
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ,a__ : Optional[Any]=False ,a__ : int="text" ) -> Dict:
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
__A : int = """cpu""" # do conversion on cpu
__A : Union[str, Any] = _get_ckpt_path(a__ ,use_small=a__ )
__A : Union[str, Any] = _load_model(a__ ,a__ ,model_type=a__ ,use_small=a__ )
# load bark initial model
__A : Any = _bark_load_model(a__ ,"""cpu""" ,model_type=a__ ,use_small=a__ )
if model_type == "text":
__A : int = bark_model["""model"""]
if model.num_parameters(exclude_embeddings=a__ ) != bark_model.get_num_params():
raise ValueError("""initial and new models don't have the same number of parameters""" )
# check if same output as the bark model
__A : Optional[Any] = 5
__A : Optional[int] = 10
if model_type in ["text", "coarse"]:
__A : Optional[int] = torch.randint(256 ,(batch_size, sequence_length) ,dtype=torch.int )
__A : Optional[int] = bark_model(a__ )[0]
__A : Union[str, Any] = model(a__ )
# take last logits
__A : Tuple = output_new_model_total.logits[:, [-1], :]
else:
__A : Dict = 3
__A : Any = 8
__A : Optional[Any] = torch.randint(256 ,(batch_size, sequence_length, n_codes_total) ,dtype=torch.int )
__A : List[Any] = model(a__ ,a__ )
__A : Optional[int] = bark_model(a__ ,a__ )
__A : Any = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("""initial and new outputs don't have the same shape""" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("""initial and new outputs are not equal""" )
Path(a__ ).mkdir(exist_ok=a__ )
model.save_pretrained(a__ )
def __SCREAMING_SNAKE_CASE ( a__ : List[Any] ,a__ : Any ,a__ : str ,a__ : List[str] ,a__ : int ,a__ : Optional[int] ,) -> List[str]:
__A : List[str] = os.path.join(a__ ,a__ )
__A : List[Any] = BarkSemanticConfig.from_pretrained(os.path.join(a__ ,"""config.json""" ) )
__A : Union[str, Any] = BarkCoarseConfig.from_pretrained(os.path.join(a__ ,"""config.json""" ) )
__A : List[str] = BarkFineConfig.from_pretrained(os.path.join(a__ ,"""config.json""" ) )
__A : Optional[int] = EncodecConfig.from_pretrained("""facebook/encodec_24khz""" )
__A : Union[str, Any] = BarkSemanticModel.from_pretrained(a__ )
__A : int = BarkCoarseModel.from_pretrained(a__ )
__A : Any = BarkFineModel.from_pretrained(a__ )
__A : int = EncodecModel.from_pretrained("""facebook/encodec_24khz""" )
__A : Dict = BarkConfig.from_sub_model_configs(
a__ ,a__ ,a__ ,a__ )
__A : Any = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config ,coarseAcoustic.generation_config ,fineAcoustic.generation_config )
__A : int = BarkModel(a__ )
__A : Optional[int] = semantic
__A : Any = coarseAcoustic
__A : Optional[Any] = fineAcoustic
__A : Optional[Any] = codec
__A : Tuple = bark_generation_config
Path(a__ ).mkdir(exist_ok=a__ )
bark.save_pretrained(a__ ,repo_id=a__ ,push_to_hub=a__ )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 17 |
UpperCAmelCase_ : dict[str, float] = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.35_5818,
}
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ,a__ : float ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__A : Optional[int] = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {", ".join(a__ )}"""
)
raise ValueError(a__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 1 |
def __SCREAMING_SNAKE_CASE ( a__ : str ) -> str:
return " ".join(
"""""".join(word[::-1] ) if len(a__ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 17 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ : Optional[Any] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 | 1 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
UpperCAmelCase_ : Optional[Any] = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
UpperCAmelCase_ : Union[str, Any] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
UpperCAmelCase_ : List[Any] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
UpperCAmelCase_ : int = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
UpperCAmelCase_ : Dict = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
UpperCAmelCase_ : Optional[Any] = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
UpperCAmelCase_ : List[Any] = tf.keras.preprocessing.image.img_to_array(test_image)
UpperCAmelCase_ : List[Any] = np.expand_dims(test_image, axis=0)
UpperCAmelCase_ : Dict = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
UpperCAmelCase_ : Union[str, Any] = '''Normal'''
if result[0][0] == 1:
UpperCAmelCase_ : Tuple = '''Abnormality detected'''
| 17 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
UpperCAmelCase_ : Optional[Any] = [
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''},
{'''dataset''': '''snli''', '''config_name''': '''plain_text'''},
{'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''},
{'''dataset''': '''wiki40b''', '''config_name''': '''en'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''},
{'''dataset''': '''natural_questions''', '''config_name''': '''default'''},
]
def __SCREAMING_SNAKE_CASE ( a__ : str=True ) -> List[Any]:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_lowercase ) )
class lowerCamelCase_ ( _lowercase ):
_lowercase : Optional[int] = None
_lowercase : str = None
def lowerCAmelCase_ ( self : Dict , __A : Optional[int] , __A : Optional[Any] ):
with TemporaryDirectory() as tmp_dir:
__A : List[Any] = dataset_module_factory(__A , cache_dir=__A )
__A : Tuple = import_main_class(dataset_module.module_path , dataset=__A )
__A : DatasetBuilder = builder_cls(
cache_dir=__A , config_name=__A , hash=dataset_module.hash , )
__A : List[Any] = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=__A ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
__A : Union[str, Any] = cached_path(__A , cache_dir=__A )
self.assertTrue(os.path.exists(__A ) )
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( a__ : Dict ) -> Optional[Any]:
__A : Optional[Any] = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
__A : Union[str, Any] = dataset_module_factory("""wikipedia""" ,cache_dir=a__ )
__A : List[Any] = import_main_class(dataset_module.module_path )
__A : DatasetBuilder = builder_cls(
cache_dir=a__ ,config_name="""20220301.frr""" ,hash=dataset_module.hash ,)
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
__A : Any = None
builder_instance.download_and_prepare()
__A : Union[str, Any] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ) -> List[str]:
__A : Tuple = dataset_module_factory("""wikipedia""" ,cache_dir=a__ )
__A : str = import_main_class(dataset_module.module_path ,dataset=a__ )
__A : DatasetBuilder = builder_cls(
cache_dir=a__ ,config_name="""20220301.frr""" ,hash=dataset_module.hash ,)
__A : Optional[int] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(a__ ,a__ )
assert "train" in ds
assert isinstance(ds["""train"""] ,a__ )
assert next(iter(ds["""train"""] ) )
| 17 | 1 |
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> Union[str, Any]:
random.seed(a__ )
np.random.seed(a__ )
torch.manual_seed(a__ )
torch.cuda.manual_seed_all(a__ )
# ^^ safe to call this function even if cuda is not available
class lowerCamelCase_ :
def __init__( self : Optional[int] , __A : Iterable[torch.nn.Parameter] , __A : float = 0.9_9_9_9 , __A : float = 0.0 , __A : int = 0 , __A : bool = False , __A : Union[float, int] = 1.0 , __A : Union[float, int] = 2 / 3 , __A : Optional[Any] = None , __A : Dict[str, Any] = None , **__A : Union[str, Any] , ):
if isinstance(__A , torch.nn.Module ):
__A : Optional[Any] = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , __A , standard_warn=__A , )
__A : List[Any] = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
__A : List[Any] = True
if kwargs.get("""max_value""" , __A ) is not None:
__A : List[Any] = """The `max_value` argument is deprecated. Please use `decay` instead."""
deprecate("""max_value""" , """1.0.0""" , __A , standard_warn=__A )
__A : Any = kwargs["""max_value"""]
if kwargs.get("""min_value""" , __A ) is not None:
__A : List[str] = """The `min_value` argument is deprecated. Please use `min_decay` instead."""
deprecate("""min_value""" , """1.0.0""" , __A , standard_warn=__A )
__A : List[Any] = kwargs["""min_value"""]
__A : Optional[int] = list(__A )
__A : Optional[int] = [p.clone().detach() for p in parameters]
if kwargs.get("""device""" , __A ) is not None:
__A : str = """The `device` argument is deprecated. Please use `to` instead."""
deprecate("""device""" , """1.0.0""" , __A , standard_warn=__A )
self.to(device=kwargs["""device"""] )
__A : Tuple = None
__A : str = decay
__A : List[str] = min_decay
__A : Tuple = update_after_step
__A : Dict = use_ema_warmup
__A : Optional[Any] = inv_gamma
__A : Any = power
__A : Optional[int] = 0
__A : Union[str, Any] = None # set in `step()`
__A : Optional[int] = model_cls
__A : str = model_config
@classmethod
def lowerCAmelCase_ ( cls : Tuple , __A : List[Any] , __A : Optional[Any] ):
__A , __A : str = model_cls.load_config(__A , return_unused_kwargs=__A )
__A : Optional[Any] = model_cls.from_pretrained(__A )
__A : Optional[Any] = cls(model.parameters() , model_cls=__A , model_config=model.config )
ema_model.load_state_dict(__A )
return ema_model
def lowerCAmelCase_ ( self : Optional[int] , __A : Tuple ):
if self.model_cls is None:
raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" )
if self.model_config is None:
raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" )
__A : List[str] = self.model_cls.from_config(self.model_config )
__A : List[str] = self.state_dict()
state_dict.pop("""shadow_params""" , __A )
model.register_to_config(**__A )
self.copy_to(model.parameters() )
model.save_pretrained(__A )
def lowerCAmelCase_ ( self : List[Any] , __A : int ):
__A : Tuple = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
__A : Any = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
__A : str = (1 + step) / (10 + step)
__A : Dict = min(__A , self.decay )
# make sure decay is not smaller than min_decay
__A : Dict = max(__A , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCAmelCase_ ( self : List[str] , __A : Iterable[torch.nn.Parameter] ):
if isinstance(__A , torch.nn.Module ):
__A : Tuple = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , __A , standard_warn=__A , )
__A : Union[str, Any] = parameters.parameters()
__A : Any = list(__A )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
__A : List[Any] = self.get_decay(self.optimization_step )
__A : Optional[int] = decay
__A : Union[str, Any] = 1 - decay
__A : int = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , __A ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
__A : List[str] = deepspeed.zero.GatheredParameters(__A , modifier_rank=__A )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__A )
def lowerCAmelCase_ ( self : str , __A : Iterable[torch.nn.Parameter] ):
__A : Union[str, Any] = list(__A )
for s_param, param in zip(self.shadow_params , __A ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCAmelCase_ ( self : Any , __A : List[Any]=None , __A : List[Any]=None ):
__A : Tuple = [
p.to(device=__A , dtype=__A ) if p.is_floating_point() else p.to(device=__A )
for p in self.shadow_params
]
def lowerCAmelCase_ ( self : Tuple ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCAmelCase_ ( self : Union[str, Any] , __A : Iterable[torch.nn.Parameter] ):
__A : List[str] = [param.detach().cpu().clone() for param in parameters]
def lowerCAmelCase_ ( self : Optional[Any] , __A : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" )
for c_param, param in zip(self.temp_stored_params , __A ):
param.data.copy_(c_param.data )
# Better memory-wise.
__A : Any = None
def lowerCAmelCase_ ( self : Any , __A : dict ):
__A : Optional[Any] = copy.deepcopy(__A )
__A : Optional[Any] = state_dict.get("""decay""" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("""Decay must be between 0 and 1""" )
__A : int = state_dict.get("""min_decay""" , self.min_decay )
if not isinstance(self.min_decay , __A ):
raise ValueError("""Invalid min_decay""" )
__A : List[Any] = state_dict.get("""optimization_step""" , self.optimization_step )
if not isinstance(self.optimization_step , __A ):
raise ValueError("""Invalid optimization_step""" )
__A : List[Any] = state_dict.get("""update_after_step""" , self.update_after_step )
if not isinstance(self.update_after_step , __A ):
raise ValueError("""Invalid update_after_step""" )
__A : int = state_dict.get("""use_ema_warmup""" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , __A ):
raise ValueError("""Invalid use_ema_warmup""" )
__A : Any = state_dict.get("""inv_gamma""" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("""Invalid inv_gamma""" )
__A : List[Any] = state_dict.get("""power""" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("""Invalid power""" )
__A : Dict = state_dict.get("""shadow_params""" , __A )
if shadow_params is not None:
__A : Union[str, Any] = shadow_params
if not isinstance(self.shadow_params , __A ):
raise ValueError("""shadow_params must be a list""" )
if not all(isinstance(__A , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("""shadow_params must all be Tensors""" )
| 17 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : Optional[int] , __A : Union[str, Any] , __A : int=7 , __A : int=3 , __A : int=30 , __A : Dict=400 , __A : str=True , __A : str=None , __A : str=True , __A : Optional[int]=[0.5, 0.5, 0.5] , __A : List[str]=[0.5, 0.5, 0.5] , __A : Optional[Any]=True , __A : int=1 / 255 , __A : List[Any]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
__A : Union[str, Any] = parent
__A : Union[str, Any] = batch_size
__A : Union[str, Any] = num_channels
__A : Optional[Any] = min_resolution
__A : Union[str, Any] = max_resolution
__A : Any = do_resize
__A : Union[str, Any] = size
__A : Optional[int] = do_normalize
__A : Dict = image_mean
__A : Optional[int] = image_std
__A : Tuple = do_rescale
__A : Optional[Any] = rescale_factor
__A : Tuple = do_pad
def lowerCAmelCase_ ( self : Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase_ ( self : Optional[Any] , __A : Optional[int] , __A : Dict=False ):
if not batched:
__A : Union[str, Any] = image_inputs[0]
if isinstance(__A , Image.Image ):
__A , __A : Union[str, Any] = image.size
else:
__A , __A : Optional[int] = image.shape[1], image.shape[2]
if w < h:
__A : Optional[int] = int(self.size["""shortest_edge"""] * h / w )
__A : Dict = self.size["""shortest_edge"""]
elif w > h:
__A : Optional[Any] = self.size["""shortest_edge"""]
__A : List[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
__A : Union[str, Any] = self.size["""shortest_edge"""]
__A : str = self.size["""shortest_edge"""]
else:
__A : Any = []
for image in image_inputs:
__A , __A : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__A : Tuple = max(__A , key=lambda __A : item[0] )[0]
__A : Union[str, Any] = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase_ ( _lowercase , unittest.TestCase ):
_lowercase : Tuple = DetaImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Optional[Any] ):
__A : Tuple = DetaImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : List[str] ):
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """image_mean""" ) )
self.assertTrue(hasattr(__A , """image_std""" ) )
self.assertTrue(hasattr(__A , """do_normalize""" ) )
self.assertTrue(hasattr(__A , """do_resize""" ) )
self.assertTrue(hasattr(__A , """do_rescale""" ) )
self.assertTrue(hasattr(__A , """do_pad""" ) )
self.assertTrue(hasattr(__A , """size""" ) )
def lowerCAmelCase_ ( self : Any ):
__A : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , __A )
def lowerCAmelCase_ ( self : Optional[Any] ):
pass
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : int = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A , __A : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
__A : List[str] = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : int = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Dict ):
# Initialize image_processing
__A : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
__A : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Tuple = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Any = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase_ ( self : Tuple ):
# prepare image and target
__A : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__A : Any = json.loads(f.read() )
__A : int = {"""image_id""": 3_9769, """annotations""": target}
# encode them
__A : List[str] = DetaImageProcessor()
__A : List[str] = image_processing(images=__A , annotations=__A , return_tensors="""pt""" )
# verify pixel values
__A : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __A )
__A : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__A : Tuple = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) )
# verify boxes
__A : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A )
__A : List[str] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1e-3 ) )
# verify image_id
__A : List[str] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) )
# verify is_crowd
__A : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) )
# verify class_labels
__A : List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) )
# verify orig_size
__A : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) )
# verify size
__A : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
# prepare image, target and masks_path
__A : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__A : Tuple = json.loads(f.read() )
__A : Optional[int] = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
__A : Any = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__A : Any = DetaImageProcessor(format="""coco_panoptic""" )
__A : int = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="""pt""" )
# verify pixel values
__A : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __A )
__A : Optional[Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__A : Union[str, Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) )
# verify boxes
__A : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A )
__A : Union[str, Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1e-3 ) )
# verify image_id
__A : Any = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) )
# verify is_crowd
__A : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) )
# verify class_labels
__A : Optional[Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) )
# verify masks
__A : List[str] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __A )
# verify orig_size
__A : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) )
# verify size
__A : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
| 17 | 1 |
import requests
UpperCAmelCase_ : List[Any] = '''''' # <-- Put your OpenWeatherMap appid here!
UpperCAmelCase_ : List[Any] = '''https://api.openweathermap.org/data/2.5/'''
def __SCREAMING_SNAKE_CASE ( a__ : str = "Chicago" ,a__ : str = APPID ) -> dict:
return requests.get(URL_BASE + """weather""" ,params=locals() ).json()
def __SCREAMING_SNAKE_CASE ( a__ : str = "Kolkata, India" ,a__ : str = APPID ) -> dict:
return requests.get(URL_BASE + """forecast""" ,params=locals() ).json()
def __SCREAMING_SNAKE_CASE ( a__ : float = 55.68 ,a__ : float = 12.57 ,a__ : str = APPID ) -> dict:
return requests.get(URL_BASE + """onecall""" ,params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
UpperCAmelCase_ : List[str] = input('''Enter a location:''').strip()
if location:
pprint(current_weather(location))
else:
break
| 17 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__A : List[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=a__ ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=a__ ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=a__ )
return parser.parse_args()
def __SCREAMING_SNAKE_CASE ( ) -> str:
__A : Union[str, Any] = parse_args()
# Import training_script as a module.
__A : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__A : str = script_fpath.stem
__A : int = importlib.import_module(a__ )
# Patch sys.argv
__A : List[str] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 17 | 1 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowerCamelCase_ :
def __init__( self : Tuple , __A : str = "cpu" , __A : str = "openai/clip-vit-large-patch14" ):
__A : List[str] = device
__A : List[Any] = CLIPTokenizerFast.from_pretrained(__A )
__A : Union[str, Any] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3]
__A : Dict = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1]
__A : Optional[Any] = torchvision.transforms.Normalize(self.image_mean , self.image_std )
__A : Dict = torchvision.transforms.Resize(224 )
__A : Optional[int] = torchvision.transforms.CenterCrop(224 )
def lowerCAmelCase_ ( self : Optional[Any] , __A : str ):
__A : str = self.resize(__A )
__A : Any = self.center_crop(__A )
__A : Any = self.normalize(__A )
return images
def __call__( self : Union[str, Any] , __A : Any=None , __A : Optional[Any]=None , **__A : Tuple ):
__A : int = self.tokenizer(text=__A , **__A )
__A : List[str] = self.preprocess_img(__A )
__A : Any = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowerCamelCase_ ( nn.Module ):
def __init__( self : Any , __A : List[str]=10 , __A : Dict=0.0_1 , __A : List[Any]=None , __A : Dict=None , __A : Any=None , __A : List[str]=None , __A : Optional[Any]=None , __A : Optional[Any]=None , __A : Optional[int]=False , __A : Optional[Any]=True , __A : List[Any]="image" , __A : str=True , __A : Optional[Any]=False , __A : Optional[int]=False , __A : Any=False , ):
super().__init__()
__A : List[str] = None
__A : Tuple = device if device else get_device()
if vqgan:
__A : Optional[Any] = vqgan
else:
__A : Optional[Any] = load_vqgan(self.device , conf_path=__A , ckpt_path=__A )
self.vqgan.eval()
if clip:
__A : List[Any] = clip
else:
__A : List[str] = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
__A : Optional[int] = ProcessorGradientFlow(device=self.device )
__A : Any = iterations
__A : str = lr
__A : List[Any] = log
__A : Union[str, Any] = make_grid
__A : Union[str, Any] = return_val
__A : Optional[Any] = quantize
__A : str = self.vqgan.decoder.z_shape
def lowerCAmelCase_ ( self : Any , __A : Union[str, Any]=None , __A : Any=None , __A : Tuple=5 , __A : Optional[Any]=True ):
__A : Tuple = []
if output_path is None:
__A : Any = """./animation.gif"""
if input_path is None:
__A : int = self.save_path
__A : Optional[int] = sorted(glob(input_path + """/*""" ) )
if not len(__A ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(__A ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
__A : Tuple = total_duration / len(__A )
__A : int = [frame_duration] * len(__A )
if extend_frames:
__A : Union[str, Any] = 1.5
__A : Optional[Any] = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(__A ) )
imageio.mimsave(__A , __A , duration=__A )
print(F"""gif saved to {output_path}""" )
def lowerCAmelCase_ ( self : List[Any] , __A : List[str]=None , __A : Dict=None ):
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
__A : Optional[int] = preprocess(Image.open(__A ) , target_image_size=256 ).to(self.device )
__A : List[str] = preprocess_vqgan(__A )
__A , *__A : Union[str, Any] = self.vqgan.encode(__A )
return z
def lowerCAmelCase_ ( self : Dict , __A : List[Any] ):
__A : Tuple = self.latent.detach().requires_grad_()
__A : Tuple = base_latent + transform_vector
if self.quantize:
__A , *__A : int = self.vqgan.quantize(__A )
else:
__A : List[Any] = trans_latent
return self.vqgan.decode(__A )
def lowerCAmelCase_ ( self : List[str] , __A : Tuple , __A : int , __A : Union[str, Any]=None ):
__A : int = self.clip_preprocessor(text=__A , images=__A , return_tensors="""pt""" , padding=__A )
__A : Optional[Any] = self.clip(**__A )
__A : Optional[int] = clip_outputs.logits_per_image
if weights is not None:
__A : Dict = similarity_logits * weights
return similarity_logits.sum()
def lowerCAmelCase_ ( self : Tuple , __A : int , __A : Optional[Any] , __A : Union[str, Any] ):
__A : int = self._get_clip_similarity(pos_prompts["""prompts"""] , __A , weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
__A : str = self._get_clip_similarity(neg_prompts["""prompts"""] , __A , weights=neg_prompts["""weights"""] )
else:
__A : Any = torch.tensor([1] , device=self.device )
__A : List[Any] = -torch.log(__A ) + torch.log(__A )
return loss
def lowerCAmelCase_ ( self : Optional[Any] , __A : Dict , __A : Tuple , __A : Dict ):
__A : Union[str, Any] = torch.randn_like(self.latent , requires_grad=__A , device=self.device )
__A : Tuple = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
__A : Optional[Any] = self._add_vector(__A )
__A : List[str] = loop_post_process(__A )
__A : str = self._get_CLIP_loss(__A , __A , __A )
print("""CLIP loss""" , __A )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=__A )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def lowerCAmelCase_ ( self : List[str] , __A : Dict , __A : Any , __A : Tuple ):
wandb.init(reinit=__A , project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
__A : str = Image.open(__A )
__A : Dict = image.resize((256, 256) )
wandb.log("""Original Image""" , wandb.Image(__A ) )
def lowerCAmelCase_ ( self : Tuple , __A : Optional[Any] ):
if not prompts:
return []
__A : List[str] = []
__A : str = []
if isinstance(__A , __A ):
__A : List[str] = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(__A , (tuple, list) ):
__A : Dict = prompt[0]
__A : List[Any] = float(prompt[1] )
elif ":" in prompt:
__A , __A : Union[str, Any] = prompt.split(""":""" )
__A : Union[str, Any] = float(__A )
else:
__A : Dict = prompt
__A : Optional[Any] = 1.0
processed_prompts.append(__A )
weights.append(__A )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__A , device=self.device ),
}
def lowerCAmelCase_ ( self : List[Any] , __A : Union[str, Any] , __A : Optional[Any]=None , __A : Tuple=None , __A : Tuple=True , __A : Union[str, Any]=False , __A : Dict=True , __A : List[Any]=True , __A : Any=None , ):
if image_path:
__A : Optional[int] = self._get_latent(__A )
else:
__A : Union[str, Any] = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(__A , __A , __A )
assert pos_prompts, "You must provide at least one positive prompt."
__A : str = self.process_prompts(__A )
__A : Any = self.process_prompts(__A )
if save_final and save_path is None:
__A : List[str] = os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(__A ):
os.makedirs(__A )
else:
__A : Tuple = save_path + """_""" + get_timestamp()
os.makedirs(__A )
__A : Any = save_path
__A : Dict = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(__A ) )
__A : List[str] = loop_post_process(__A )
for iter, transformed_img in enumerate(self._optimize_CLIP(__A , __A , __A ) ):
if show_intermediate:
show_pil(__A )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F"""iter_{iter:03d}.png""" ) )
if self.log:
wandb.log({"""Image""": wandb.Image(__A )} )
if show_final:
show_pil(__A )
if save_final:
transformed_img.save(os.path.join(self.save_path , F"""iter_{iter:03d}_final.png""" ) )
| 17 |
from collections.abc import Sequence
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[float] ,a__ : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(a__ ) )
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[float] ,a__ : float ) -> float:
__A : Any = 0.0
for coeff in reversed(a__ ):
__A : List[str] = result * x + coeff
return result
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCAmelCase_ : str = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 17 | 1 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
UpperCAmelCase_ : List[Any] = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def __SCREAMING_SNAKE_CASE ( a__ : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
__A : Dict = BeautifulSoup(requests.get(url + location ).content ,"""html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""" ,attrs={"""data-tn-component""": """organicJob"""} ):
__A : Optional[int] = job.find("""a""" ,attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
__A : str = job.find("""span""" ,{"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 17 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCamelCase_ ( _lowercase ):
_lowercase : Union[str, Any] = '''EncodecFeatureExtractor'''
_lowercase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : List[Any] , __A : Any , __A : Tuple ):
super().__init__(__A , __A )
__A : Dict = self.feature_extractor
__A : List[str] = False
def lowerCAmelCase_ ( self : Union[str, Any] , __A : str=None , __A : Tuple=None , __A : Dict=True ):
return self.tokenizer.get_decoder_prompt_ids(task=__A , language=__A , no_timestamps=__A )
def __call__( self : Optional[Any] , *__A : Tuple , **__A : Tuple ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__A , **__A )
__A : str = kwargs.pop("""audio""" , __A )
__A : Optional[Any] = kwargs.pop("""sampling_rate""" , __A )
__A : int = kwargs.pop("""text""" , __A )
if len(__A ) > 0:
__A : int = args[0]
__A : Dict = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if text is not None:
__A : Dict = self.tokenizer(__A , **__A )
if audio is not None:
__A : Optional[int] = self.feature_extractor(__A , *__A , sampling_rate=__A , **__A )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__A : List[Any] = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
__A : int = audio_inputs["""padding_mask"""]
return inputs
def lowerCAmelCase_ ( self : List[str] , *__A : int , **__A : Tuple ):
__A : Optional[int] = kwargs.pop("""audio""" , __A )
__A : List[str] = kwargs.pop("""padding_mask""" , __A )
if len(__A ) > 0:
__A : Dict = args[0]
__A : Optional[int] = args[1:]
if audio_values is not None:
return self._decode_audio(__A , padding_mask=__A )
else:
return self.tokenizer.batch_decode(*__A , **__A )
def lowerCAmelCase_ ( self : Optional[Any] , *__A : Dict , **__A : Any ):
return self.tokenizer.decode(*__A , **__A )
def lowerCAmelCase_ ( self : Tuple , __A : Union[str, Any] , __A : Optional = None ):
__A : List[str] = to_numpy(__A )
__A , __A , __A : Tuple = audio_values.shape
if padding_mask is None:
return list(__A )
__A : Union[str, Any] = to_numpy(__A )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__A : List[str] = seq_len - padding_mask.shape[-1]
__A : Tuple = 1 - self.feature_extractor.padding_value
__A : Optional[int] = np.pad(__A , ((0, 0), (0, difference)) , """constant""" , constant_values=__A )
__A : int = audio_values.tolist()
for i in range(__A ):
__A : str = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__A : List[Any] = sliced_audio.reshape(__A , -1 )
return audio_values
| 17 | 1 |
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 |
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> int:
if not isinstance(a__ ,a__ ):
raise TypeError("""Input value must be an 'int' type""" )
__A : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 1 |
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : int ) -> int:
return number | (1 << position)
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : int ) -> int:
return number & ~(1 << position)
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : int ) -> int:
return number ^ (1 << position)
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : int ) -> bool:
return ((number >> position) & 1) == 1
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : int ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 |
UpperCAmelCase_ : dict[tuple[int, int, int], int] = {}
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : int ,a__ : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__A : List[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__A : Dict = _calculate(days - 1 ,a__ ,late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__A : List[str] = _calculate(days - 1 ,absent + 1 ,0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__A : int = _calculate(days - 1 ,a__ ,0 )
__A : Optional[int] = state_late + state_absent + state_ontime
__A : Tuple = prizestrings
return prizestrings
def __SCREAMING_SNAKE_CASE ( a__ : int = 30 ) -> int:
return _calculate(a__ ,absent=0 ,late=0 )
if __name__ == "__main__":
print(solution())
| 17 | 1 |
def __SCREAMING_SNAKE_CASE ( a__ : str ) -> list:
__A : int = [0] * len(a__ )
for i in range(1 ,len(a__ ) ):
# use last results for better performance - dynamic programming
__A : List[Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
__A : str = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
__A : Union[str, Any] = j
return prefix_result
def __SCREAMING_SNAKE_CASE ( a__ : str ) -> int:
return max(prefix_function(a__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 |
class lowerCamelCase_ :
def __init__( self : Dict , __A : int , __A : Tuple , __A : List[Any] ):
__A : Optional[int] = None
__A : Any = None
__A : int = graph
self._normalize_graph(__A , __A )
__A : str = len(__A )
__A : Optional[int] = None
def lowerCAmelCase_ ( self : int , __A : Any , __A : Optional[Any] ):
if sources is int:
__A : Dict = [sources]
if sinks is int:
__A : Optional[int] = [sinks]
if len(__A ) == 0 or len(__A ) == 0:
return
__A : str = sources[0]
__A : Dict = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__A ) > 1 or len(__A ) > 1:
__A : Optional[Any] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
__A : List[Any] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
__A : str = max_input_flow
__A : Union[str, Any] = 0
__A : Any = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__A : int = max_input_flow
__A : Optional[Any] = size - 1
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowerCAmelCase_ ( self : Optional[Any] , __A : Dict ):
__A : Dict = algorithm(self )
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , __A : str ):
__A : Any = flow_network
__A : int = flow_network.verticesCount
__A : List[Any] = flow_network.sourceIndex
__A : Union[str, Any] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__A : Optional[int] = flow_network.graph
__A : str = False
def lowerCAmelCase_ ( self : List[Any] ):
if not self.executed:
self._algorithm()
__A : Any = True
def lowerCAmelCase_ ( self : List[str] ):
pass
class lowerCamelCase_ ( _lowercase ):
def __init__( self : Any , __A : List[str] ):
super().__init__(__A )
# use this to save your result
__A : str = -1
def lowerCAmelCase_ ( self : Any ):
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class lowerCamelCase_ ( _lowercase ):
def __init__( self : List[Any] , __A : Dict ):
super().__init__(__A )
__A : Tuple = [[0] * self.verticies_count for i in range(self.verticies_count )]
__A : Optional[Any] = [0] * self.verticies_count
__A : Union[str, Any] = [0] * self.verticies_count
def lowerCAmelCase_ ( self : int ):
__A : Optional[int] = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__A : List[str] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__A : Dict = 0
while i < len(__A ):
__A : List[Any] = vertices_list[i]
__A : Optional[Any] = self.heights[vertex_index]
self.process_vertex(__A )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__A ) )
__A : Any = 0
else:
i += 1
__A : Optional[int] = sum(self.preflow[self.source_index] )
def lowerCAmelCase_ ( self : Optional[Any] , __A : str ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__A , __A )
self.relabel(__A )
def lowerCAmelCase_ ( self : Dict , __A : List[str] , __A : Optional[Any] ):
__A : Union[str, Any] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowerCAmelCase_ ( self : Optional[Any] , __A : Tuple ):
__A : Tuple = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__A : Dict = self.heights[to_index]
if min_height is not None:
__A : Optional[int] = min_height + 1
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = [0]
UpperCAmelCase_ : Dict = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
UpperCAmelCase_ : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
UpperCAmelCase_ : str = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
UpperCAmelCase_ : int = flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 17 | 1 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
UpperCAmelCase_ : str = '''bert-base-cased'''
UpperCAmelCase_ : str = '''google/pegasus-xsum'''
UpperCAmelCase_ : str = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
UpperCAmelCase_ : Optional[Any] = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
UpperCAmelCase_ : List[Any] = '''patrickvonplaten/t5-tiny-random'''
UpperCAmelCase_ : str = '''sshleifer/bart-tiny-random'''
UpperCAmelCase_ : Optional[int] = '''sshleifer/tiny-mbart'''
UpperCAmelCase_ : Optional[int] = '''sshleifer/tiny-marian-en-de'''
def __SCREAMING_SNAKE_CASE ( a__ : Path ,a__ : list ) -> List[Any]:
__A : Optional[int] = """\n""".join(a__ )
Path(a__ ).open("""w""" ).writelines(a__ )
def __SCREAMING_SNAKE_CASE ( a__ : Union[str, Any] ) -> List[Any]:
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(a__ ,f"""{split}.source""" ) ,a__ )
_dump_articles(os.path.join(a__ ,f"""{split}.target""" ) ,a__ )
return tmp_dir
class lowerCamelCase_ ( _lowercase ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def lowerCAmelCase_ ( self : List[Any] , __A : Any ):
__A : List[Any] = AutoTokenizer.from_pretrained(__A )
__A : Optional[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__A : Optional[int] = max(len(tokenizer.encode(__A ) ) for a in ARTICLES )
__A : List[str] = max(len(tokenizer.encode(__A ) ) for a in SUMMARIES )
__A : Dict = 4
__A : Optional[int] = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
__A , __A : Dict = """ro_RO""", """de_DE""" # ignored for all but mbart, but never causes error.
__A : Tuple = SeqaSeqDataset(
__A , data_dir=__A , type_path="""train""" , max_source_length=__A , max_target_length=__A , src_lang=__A , tgt_lang=__A , )
__A : List[str] = DataLoader(__A , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(__A , __A )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
__A : Any = shift_tokens_right(batch["""labels"""] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def lowerCAmelCase_ ( self : Optional[Any] , __A : Optional[int] ):
__A : str = AutoTokenizer.from_pretrained(__A )
__A : Optional[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__A : List[str] = max(len(tokenizer.encode(__A ) ) for a in ARTICLES )
__A : Optional[int] = max(len(tokenizer.encode(__A ) ) for a in SUMMARIES )
__A : Union[str, Any] = 4
__A : Optional[Any] = LegacySeqaSeqDataset(
__A , data_dir=__A , type_path="""train""" , max_source_length=20 , max_target_length=__A , )
__A : List[str] = DataLoader(__A , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def lowerCAmelCase_ ( self : List[Any] ):
__A : Tuple = AutoTokenizer.from_pretrained("""facebook/mbart-large-cc25""" )
__A : Tuple = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
__A : Tuple = tmp_dir.joinpath("""train.source""" ).open().readlines()
__A : Dict = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(__A , __A , 128 , __A )
__A : Dict = {x.name for x in tmp_dir.iterdir()}
__A : int = {x.name for x in save_dir.iterdir()}
__A : Any = save_dir.joinpath("""train.source""" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(__A ) < len(__A )
assert len(__A ) == 1
assert len(packed_examples[0] ) == sum(len(__A ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="""This test requires fairseq""" )
def lowerCAmelCase_ ( self : Optional[int] ):
if not FAIRSEQ_AVAILABLE:
return
__A , __A , __A : str = self._get_dataset(max_len=64 )
__A : Any = 64
__A : List[str] = ds.make_dynamic_sampler(__A , required_batch_size_multiple=__A )
__A : Dict = [len(__A ) for x in batch_sampler]
assert len(set(__A ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(__A ) == len(__A ) # no dropped or added examples
__A : Any = DataLoader(__A , batch_sampler=__A , collate_fn=ds.collate_fn , num_workers=2 )
__A : Any = []
__A : Any = []
for batch in data_loader:
__A : Dict = batch["""input_ids"""].shape
__A : str = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
__A : Union[str, Any] = np.product(batch["""input_ids"""].shape )
num_src_per_batch.append(__A )
if num_src_tokens > (max_tokens * 1.1):
failures.append(__A )
assert num_src_per_batch[0] == max(__A )
if failures:
raise AssertionError(F"""too many tokens in {len(__A )} batches""" )
def lowerCAmelCase_ ( self : List[Any] ):
__A , __A , __A : List[Any] = self._get_dataset(max_len=512 )
__A : Union[str, Any] = 2
__A : Any = ds.make_sortish_sampler(__A , shuffle=__A )
__A : List[Any] = DataLoader(__A , batch_size=__A , collate_fn=ds.collate_fn , num_workers=2 )
__A : str = DataLoader(__A , batch_size=__A , collate_fn=ds.collate_fn , num_workers=2 , sampler=__A )
__A : int = tokenizer.pad_token_id
def count_pad_tokens(__A : str , __A : Union[str, Any]="input_ids" ):
return [batch[k].eq(__A ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(__A , k="""labels""" ) ) < sum(count_pad_tokens(__A , k="""labels""" ) )
assert sum(count_pad_tokens(__A ) ) < sum(count_pad_tokens(__A ) )
assert len(__A ) == len(__A )
def lowerCAmelCase_ ( self : Tuple , __A : Union[str, Any]=1000 , __A : List[Any]=128 ):
if os.getenv("""USE_REAL_DATA""" , __A ):
__A : List[Any] = """examples/seq2seq/wmt_en_ro"""
__A : Union[str, Any] = max_len * 2 * 64
if not Path(__A ).joinpath("""train.len""" ).exists():
save_len_file(__A , __A )
else:
__A : Union[str, Any] = """examples/seq2seq/test_data/wmt_en_ro"""
__A : str = max_len * 4
save_len_file(__A , __A )
__A : Tuple = AutoTokenizer.from_pretrained(__A )
__A : int = SeqaSeqDataset(
__A , data_dir=__A , type_path="""train""" , max_source_length=__A , max_target_length=__A , n_obs=__A , )
return ds, max_tokens, tokenizer
def lowerCAmelCase_ ( self : List[str] ):
__A , __A , __A : int = self._get_dataset()
__A : List[Any] = set(DistributedSortishSampler(__A , 256 , num_replicas=2 , rank=0 , add_extra_examples=__A ) )
__A : Dict = set(DistributedSortishSampler(__A , 256 , num_replicas=2 , rank=1 , add_extra_examples=__A ) )
assert idsa.intersection(__A ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def lowerCAmelCase_ ( self : List[Any] , __A : Union[str, Any] ):
__A : Tuple = AutoTokenizer.from_pretrained(__A , use_fast=__A )
if tok_name == MBART_TINY:
__A : Union[str, Any] = SeqaSeqDataset(
__A , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="""train""" , max_source_length=4 , max_target_length=8 , src_lang="""EN""" , tgt_lang="""FR""" , )
__A : int = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
__A : Tuple = SeqaSeqDataset(
__A , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="""train""" , max_source_length=4 , max_target_length=8 , )
__A : Tuple = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(__A ) == 1 if tok_name == BART_TINY else len(__A ) == 0
| 17 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ) -> str | Literal[False]:
__A : Tuple = list(a__ )
__A : Optional[int] = list(a__ )
__A : int = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count += 1
__A : int = """_"""
if count > 1:
return False
else:
return "".join(a__ )
def __SCREAMING_SNAKE_CASE ( a__ : list[str] ) -> list[str]:
__A : Optional[Any] = []
while True:
__A : Tuple = ["""$"""] * len(a__ )
__A : Union[str, Any] = []
for i in range(len(a__ ) ):
for j in range(i + 1 ,len(a__ ) ):
__A : int = compare_string(binary[i] ,binary[j] )
if k is False:
__A : List[str] = """*"""
__A : Any = """*"""
temp.append("""X""" )
for i in range(len(a__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(a__ ) == 0:
return pi
__A : Optional[Any] = list(set(a__ ) )
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : Sequence[float] ) -> list[str]:
__A : List[str] = []
for minterm in minterms:
__A : List[Any] = """"""
for _ in range(a__ ):
__A : Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(a__ )
return temp
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ,a__ : int ) -> bool:
__A : Optional[Any] = list(a__ )
__A : Tuple = list(a__ )
__A : Any = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __SCREAMING_SNAKE_CASE ( a__ : list[list[int]] ,a__ : list[str] ) -> list[str]:
__A : Optional[int] = []
__A : Tuple = [0] * len(a__ )
for i in range(len(chart[0] ) ):
__A : str = 0
__A : Any = -1
for j in range(len(a__ ) ):
if chart[j][i] == 1:
count += 1
__A : Optional[Any] = j
if count == 1:
__A : int = 1
for i in range(len(a__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(a__ ) ):
__A : List[str] = 0
temp.append(prime_implicants[i] )
while True:
__A : Optional[Any] = 0
__A : Any = -1
__A : int = 0
for i in range(len(a__ ) ):
__A : List[Any] = chart[i].count(1 )
if count_n > max_n:
__A : Dict = count_n
__A : Tuple = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(a__ ) ):
__A : Union[str, Any] = 0
def __SCREAMING_SNAKE_CASE ( a__ : list[str] ,a__ : list[str] ) -> list[list[int]]:
__A : Any = [[0 for x in range(len(a__ ) )] for x in range(len(a__ ) )]
for i in range(len(a__ ) ):
__A : List[Any] = prime_implicants[i].count("""_""" )
for j in range(len(a__ ) ):
if is_for_table(prime_implicants[i] ,binary[j] ,a__ ):
__A : Union[str, Any] = 1
return chart
def __SCREAMING_SNAKE_CASE ( ) -> None:
__A : Any = int(input("""Enter the no. of variables\n""" ) )
__A : List[str] = [
float(a__ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
__A : Dict = decimal_to_binary(a__ ,a__ )
__A : Union[str, Any] = check(a__ )
print("""Prime Implicants are:""" )
print(a__ )
__A : Optional[Any] = prime_implicant_chart(a__ ,a__ )
__A : Any = selection(a__ ,a__ )
print("""Essential Prime Implicants are:""" )
print(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 17 | 1 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase_ :
def __init__( self : str , __A : Tuple , __A : Union[str, Any]=13 , __A : Union[str, Any]=30 , __A : Optional[int]=2 , __A : Optional[Any]=3 , __A : Optional[Any]=True , __A : str=True , __A : Dict=32 , __A : List[Any]=2 , __A : Dict=4 , __A : str=37 , __A : Union[str, Any]="gelu" , __A : Optional[Any]=0.1 , __A : Dict=0.1 , __A : List[str]=10 , __A : Optional[Any]=0.0_2 , __A : Optional[int]=3 , __A : Tuple=0.6 , __A : Any=None , ):
__A : int = parent
__A : str = batch_size
__A : List[Any] = image_size
__A : Optional[int] = patch_size
__A : int = num_channels
__A : Tuple = is_training
__A : Optional[int] = use_labels
__A : Union[str, Any] = hidden_size
__A : Dict = num_hidden_layers
__A : Dict = num_attention_heads
__A : int = intermediate_size
__A : Union[str, Any] = hidden_act
__A : Dict = hidden_dropout_prob
__A : List[str] = attention_probs_dropout_prob
__A : Union[str, Any] = type_sequence_label_size
__A : str = initializer_range
__A : Any = mask_ratio
__A : int = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__A : str = (image_size // patch_size) ** 2
__A : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCAmelCase_ ( self : Optional[int] ):
__A : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A : Union[str, Any] = None
if self.use_labels:
__A : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self : Optional[Any] ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCAmelCase_ ( self : List[Any] , __A : Tuple , __A : int , __A : Any ):
__A : int = TFViTMAEModel(config=__A )
__A : List[str] = model(__A , training=__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Dict , __A : str , __A : Union[str, Any] , __A : Union[str, Any] ):
__A : Optional[Any] = TFViTMAEForPreTraining(__A )
__A : Optional[Any] = model(__A , training=__A )
# expected sequence length = num_patches
__A : Any = (self.image_size // self.patch_size) ** 2
__A : str = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__A : Tuple = 1
__A : Optional[int] = TFViTMAEForPreTraining(__A )
__A : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A : List[str] = model(__A , training=__A )
__A : List[str] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
__A : int = self.prepare_config_and_inputs()
((__A) , (__A) , (__A)) : int = config_and_inputs
__A : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
_lowercase : Any = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
_lowercase : Optional[Any] = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {}
_lowercase : Optional[int] = False
_lowercase : Optional[int] = False
_lowercase : Tuple = False
_lowercase : List[str] = False
def lowerCAmelCase_ ( self : Tuple ):
__A : Optional[Any] = TFViTMAEModelTester(self )
__A : Optional[Any] = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=37 )
def lowerCAmelCase_ ( self : int ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def lowerCAmelCase_ ( self : List[str] ):
pass
def lowerCAmelCase_ ( self : str ):
__A , __A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Optional[int] = model_class(__A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__A : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , tf.keras.layers.Layer ) )
def lowerCAmelCase_ ( self : Any ):
__A , __A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : int = model_class(__A )
__A : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : Any = [*signature.parameters.keys()]
__A : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __A )
def lowerCAmelCase_ ( self : str ):
__A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCAmelCase_ ( self : Optional[int] ):
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__A )
def lowerCAmelCase_ ( self : Tuple ):
# make the mask reproducible
np.random.seed(2 )
__A , __A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__A : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
__A : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__A : str = model_class(__A )
__A : int = self._prepare_for_class(__A , __A )
__A : str = model(__A , noise=__A )
__A : Optional[int] = copy.deepcopy(self._prepare_for_class(__A , __A ) )
__A : Optional[Any] = model(**__A , noise=__A )
__A : int = outputs_dict[0].numpy()
__A : Union[str, Any] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def lowerCAmelCase_ ( self : int ):
# make the mask reproducible
np.random.seed(2 )
__A , __A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__A : str = int((config.image_size // config.patch_size) ** 2 )
__A : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(__A : Union[str, Any] ):
__A : Union[str, Any] = {}
for k, v in inputs_dict.items():
if tf.is_tensor(__A ):
__A : Dict = v.numpy()
else:
__A : str = np.array(__A )
return inputs_np_dict
for model_class in self.all_model_classes:
__A : Dict = model_class(__A )
__A : Optional[Any] = self._prepare_for_class(__A , __A )
__A : Union[str, Any] = prepare_numpy_arrays(__A )
__A : List[str] = model(__A , noise=__A )
__A : Union[str, Any] = model(**__A , noise=__A )
self.assert_outputs_same(__A , __A )
def lowerCAmelCase_ ( self : Dict , __A : Union[str, Any] , __A : Union[str, Any] , __A : Dict ):
# make masks reproducible
np.random.seed(2 )
__A : Tuple = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
__A : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__A : Optional[Any] = tf.constant(__A )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__A : List[Any] = tf_noise
super().check_pt_tf_models(__A , __A , __A )
def lowerCAmelCase_ ( self : Dict ):
# make mask reproducible
np.random.seed(2 )
__A , __A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : Optional[int] = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(__A )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(__A , __A ),)
if isinstance(__A , __A )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(__A , """_keras_serializable""" , __A )
}
__A : Optional[int] = int((config.image_size // config.patch_size) ** 2 )
__A : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__A : List[str] = tf.convert_to_tensor(__A )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
__A : Union[str, Any] = main_layer_class(__A )
__A : Tuple = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
__A : List[str] = tf.keras.Model(__A , outputs=main_layer(__A ) )
__A : Union[str, Any] = model(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
__A : List[str] = os.path.join(__A , """keras_model.h5""" )
model.save(__A )
__A : List[Any] = tf.keras.models.load_model(
__A , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(__A , tf.keras.Model )
__A : str = model(__A )
self.assert_outputs_same(__A , __A )
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ):
# make mask reproducible
np.random.seed(2 )
__A , __A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__A : List[str] = int((config.image_size // config.patch_size) ** 2 )
__A : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__A : Any = model_class(__A )
__A : Dict = self._prepare_for_class(__A , __A )
__A : Optional[Any] = model(__A , noise=__A )
if model_class.__name__ == "TFViTMAEModel":
__A : List[str] = outputs.last_hidden_state.numpy()
__A : Optional[Any] = 0
else:
__A : List[str] = outputs.logits.numpy()
__A : List[str] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A , saved_model=__A )
__A : List[str] = model_class.from_pretrained(__A )
__A : str = model(__A , noise=__A )
if model_class.__name__ == "TFViTMAEModel":
__A : Dict = after_outputs["""last_hidden_state"""].numpy()
__A : List[str] = 0
else:
__A : Optional[int] = after_outputs["""logits"""].numpy()
__A : Dict = 0
__A : Optional[int] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__A , 1e-5 )
def lowerCAmelCase_ ( self : Union[str, Any] ):
# make mask reproducible
np.random.seed(2 )
__A , __A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : List[str] = int((config.image_size // config.patch_size) ** 2 )
__A : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__A : Tuple = model_class(__A )
__A : Optional[int] = self._prepare_for_class(__A , __A )
__A : Tuple = model(__A , noise=__A )
__A : int = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(__A )
__A : Dict = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
__A : Tuple = model_class.from_config(model.config )
__A : Union[str, Any] = new_model(__A ) # Build model
new_model.set_weights(model.get_weights() )
__A : Optional[int] = new_model(__A , noise=__A )
self.assert_outputs_same(__A , __A )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCAmelCase_ ( self : Optional[int] ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def lowerCAmelCase_ ( self : int ):
pass
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
__A : Dict = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(__A )
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
__A : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase_ ( self : int ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def lowerCAmelCase_ ( self : int ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
__A : List[Any] = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
__A : List[Any] = self.default_image_processor
__A : Union[str, Any] = prepare_img()
__A : Union[str, Any] = image_processor(images=__A , return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__A : Dict = ViTMAEConfig()
__A : List[str] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__A : List[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
__A : int = model(**__A , noise=__A )
# verify the logits
__A : Optional[Any] = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , __A )
__A : Optional[Any] = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , __A , atol=1e-4 )
| 17 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ,a__ : Dict ,a__ : Union[str, Any] ,a__ : Any ) -> Optional[int]: # noqa: E741
while r - l > 1:
__A : Any = (l + r) // 2
if v[m] >= key:
__A : Optional[int] = m
else:
__A : List[Any] = m # noqa: E741
return r
def __SCREAMING_SNAKE_CASE ( a__ : list[int] ) -> int:
if len(a__ ) == 0:
return 0
__A : str = [0] * len(a__ )
__A : List[str] = 1
__A : List[Any] = v[0]
for i in range(1 ,len(a__ ) ):
if v[i] < tail[0]:
__A : int = v[i]
elif v[i] > tail[length - 1]:
__A : Union[str, Any] = v[i]
length += 1
else:
__A : Any = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 1 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( a__ : list[float] ,a__ : list[float] ) -> float:
__A : str = sorted(numsa + numsa )
__A , __A : Any = divmod(len(a__ ) ,2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Dict = [float(x) for x in input('''Enter the elements of first array: ''').split()]
UpperCAmelCase_ : List[str] = [float(x) for x in input('''Enter the elements of second array: ''').split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 17 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 17 | 1 |
import argparse
from collections import defaultdict
import yaml
UpperCAmelCase_ : List[str] = '''docs/source/en/_toctree.yml'''
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> Union[str, Any]:
__A : Union[str, Any] = defaultdict(a__ )
__A : Dict = []
__A : Dict = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} )
else:
new_doc_list.append(a__ )
__A : Optional[int] = new_doc_list
__A : Optional[int] = [key for key, value in counts.items() if value > 1]
__A : Tuple = []
for duplicate_key in duplicates:
__A : Union[str, Any] = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} )
if len(a__ ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] )
__A : str = sorted(a__ ,key=lambda a__ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(a__ ) > 1:
raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" )
overview_doc.extend(a__ )
# Sort
return overview_doc
def __SCREAMING_SNAKE_CASE ( a__ : Tuple=False ) -> List[str]:
with open(a__ ,encoding="""utf-8""" ) as f:
__A : Optional[Any] = yaml.safe_load(f.read() )
# Get to the API doc
__A : List[str] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__A : Tuple = content[api_idx]["""sections"""]
# Then to the model doc
__A : List[Any] = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
__A : List[Any] = api_doc[scheduler_idx]["""sections"""]
__A : Tuple = clean_doc_toc(a__ )
__A : Tuple = False
if new_scheduler_doc != scheduler_doc:
__A : List[str] = True
if overwrite:
__A : List[str] = new_scheduler_doc
if diff:
if overwrite:
__A : Optional[int] = api_doc
with open(a__ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(yaml.dump(a__ ,allow_unicode=a__ ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
def __SCREAMING_SNAKE_CASE ( a__ : Any=False ) -> Any:
with open(a__ ,encoding="""utf-8""" ) as f:
__A : Union[str, Any] = yaml.safe_load(f.read() )
# Get to the API doc
__A : Union[str, Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__A : Dict = content[api_idx]["""sections"""]
# Then to the model doc
__A : Dict = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
__A : Optional[Any] = False
__A : List[str] = api_doc[pipeline_idx]["""sections"""]
__A : Optional[int] = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
__A : List[Any] = pipeline_doc["""section"""]
__A : int = clean_doc_toc(a__ )
if overwrite:
__A : Dict = new_sub_pipeline_doc
new_pipeline_docs.append(a__ )
# sort overall pipeline doc
__A : Tuple = clean_doc_toc(a__ )
if new_pipeline_docs != pipeline_docs:
__A : List[str] = True
if overwrite:
__A : List[Any] = new_pipeline_docs
if diff:
if overwrite:
__A : Optional[Any] = api_doc
with open(a__ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(yaml.dump(a__ ,allow_unicode=a__ ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
UpperCAmelCase_ : Tuple = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 17 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class lowerCamelCase_ ( _lowercase ):
_lowercase : Dict = '''unispeech'''
def __init__( self : str , __A : Tuple=32 , __A : List[str]=768 , __A : Dict=12 , __A : Union[str, Any]=12 , __A : Tuple=3072 , __A : Any="gelu" , __A : int=0.1 , __A : Optional[int]=0.1 , __A : List[Any]=0.1 , __A : Any=0.0 , __A : List[str]=0.0 , __A : int=0.1 , __A : List[Any]=0.1 , __A : List[str]=0.0_2 , __A : List[str]=1e-5 , __A : List[Any]="group" , __A : int="gelu" , __A : Any=(512, 512, 512, 512, 512, 512, 512) , __A : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , __A : Tuple=(10, 3, 3, 3, 3, 2, 2) , __A : Optional[int]=False , __A : Any=128 , __A : Union[str, Any]=16 , __A : Optional[Any]=False , __A : str=True , __A : Dict=0.0_5 , __A : Optional[Any]=10 , __A : Dict=2 , __A : int=0.0 , __A : List[str]=10 , __A : str=0 , __A : List[str]=320 , __A : List[Any]=2 , __A : Tuple=0.1 , __A : Optional[int]=100 , __A : Any=256 , __A : Dict=256 , __A : Tuple=0.1 , __A : List[str]="mean" , __A : int=False , __A : List[str]=False , __A : List[Any]=256 , __A : str=80 , __A : Tuple=0 , __A : Tuple=1 , __A : int=2 , __A : Dict=0.5 , **__A : List[Any] , ):
super().__init__(**__A , pad_token_id=__A , bos_token_id=__A , eos_token_id=__A )
__A : Dict = hidden_size
__A : Optional[Any] = feat_extract_norm
__A : List[Any] = feat_extract_activation
__A : str = list(__A )
__A : Optional[Any] = list(__A )
__A : Optional[int] = list(__A )
__A : List[Any] = conv_bias
__A : Optional[int] = num_conv_pos_embeddings
__A : List[Any] = num_conv_pos_embedding_groups
__A : int = len(self.conv_dim )
__A : Optional[Any] = num_hidden_layers
__A : List[str] = intermediate_size
__A : Union[str, Any] = hidden_act
__A : Optional[int] = num_attention_heads
__A : Tuple = hidden_dropout
__A : Optional[Any] = attention_dropout
__A : Union[str, Any] = activation_dropout
__A : Dict = feat_proj_dropout
__A : Optional[int] = final_dropout
__A : Dict = layerdrop
__A : Optional[int] = layer_norm_eps
__A : Optional[Any] = initializer_range
__A : Optional[int] = num_ctc_classes
__A : Dict = vocab_size
__A : List[str] = do_stable_layer_norm
__A : Tuple = use_weighted_layer_sum
__A : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__A : Tuple = apply_spec_augment
__A : Union[str, Any] = mask_time_prob
__A : Optional[Any] = mask_time_length
__A : List[Any] = mask_time_min_masks
__A : List[Any] = mask_feature_prob
__A : Any = mask_feature_length
__A : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__A : Any = num_codevectors_per_group
__A : Tuple = num_codevector_groups
__A : List[str] = contrastive_logits_temperature
__A : Optional[int] = feat_quantizer_dropout
__A : int = num_negatives
__A : List[str] = codevector_dim
__A : int = proj_codevector_dim
__A : Union[str, Any] = diversity_loss_weight
# ctc loss
__A : List[str] = ctc_loss_reduction
__A : Any = ctc_zero_infinity
# pretraining loss
__A : Union[str, Any] = replace_prob
@property
def lowerCAmelCase_ ( self : int ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 17 | 1 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowerCamelCase_ ( _lowercase ):
_lowercase : Dict = '''M-CLIP'''
def __init__( self : List[str] , __A : str=1024 , __A : Optional[Any]=768 , **__A : Any ):
__A : List[str] = transformerDimSize
__A : int = imageDimSize
super().__init__(**__A )
class lowerCamelCase_ ( _lowercase ):
_lowercase : List[str] = MCLIPConfig
def __init__( self : Union[str, Any] , __A : Dict , *__A : Any , **__A : Union[str, Any] ):
super().__init__(__A , *__A , **__A )
__A : Any = XLMRobertaModel(__A )
__A : Tuple = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def lowerCAmelCase_ ( self : Dict , __A : Dict , __A : int ):
__A : Dict = self.transformer(input_ids=__A , attention_mask=__A )[0]
__A : Optional[int] = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(__A ), embs
| 17 |
import fire
from utils import calculate_rouge, save_json
def __SCREAMING_SNAKE_CASE ( a__ : Any ,a__ : Tuple ,a__ : Any=None ,**a__ : Dict ) -> Optional[Any]:
__A : int = [x.strip() for x in open(a__ ).readlines()]
__A : List[str] = [x.strip() for x in open(a__ ).readlines()][: len(a__ )]
__A : List[Any] = calculate_rouge(a__ ,a__ ,**a__ )
if save_path is not None:
save_json(a__ ,a__ ,indent=a__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 17 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCamelCase_ :
_lowercase : CommonSchedulerState
# setable values
_lowercase : jnp.ndarray
_lowercase : jnp.ndarray
_lowercase : Optional[int] = None
@classmethod
def lowerCAmelCase_ ( cls : Any , __A : CommonSchedulerState , __A : jnp.ndarray , __A : jnp.ndarray ):
return cls(common=__A , init_noise_sigma=__A , timesteps=__A )
@dataclass
class lowerCamelCase_ ( _lowercase ):
_lowercase : DDPMSchedulerState
class lowerCamelCase_ ( _lowercase , _lowercase ):
_lowercase : Optional[int] = [e.name for e in FlaxKarrasDiffusionSchedulers]
_lowercase : jnp.dtype
@property
def lowerCAmelCase_ ( self : Tuple ):
return True
@register_to_config
def __init__( self : str , __A : int = 1000 , __A : float = 0.0_0_0_1 , __A : float = 0.0_2 , __A : str = "linear" , __A : Optional[jnp.ndarray] = None , __A : str = "fixed_small" , __A : bool = True , __A : str = "epsilon" , __A : jnp.dtype = jnp.floataa , ):
__A : Dict = dtype
def lowerCAmelCase_ ( self : Optional[Any] , __A : Optional[CommonSchedulerState] = None ):
if common is None:
__A : List[str] = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
__A : Any = jnp.array(1.0 , dtype=self.dtype )
__A : Any = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__A , init_noise_sigma=__A , timesteps=__A , )
def lowerCAmelCase_ ( self : Any , __A : DDPMSchedulerState , __A : jnp.ndarray , __A : Optional[int] = None ):
return sample
def lowerCAmelCase_ ( self : Dict , __A : DDPMSchedulerState , __A : int , __A : Tuple = () ):
__A : Dict = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__A : Dict = (jnp.arange(0 , __A ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__A , timesteps=__A , )
def lowerCAmelCase_ ( self : Optional[int] , __A : DDPMSchedulerState , __A : List[str] , __A : Union[str, Any]=None , __A : List[str]=None ):
__A : Dict = state.common.alphas_cumprod[t]
__A : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__A : List[str] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__A : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__A : int = jnp.clip(__A , a_min=1e-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__A : Union[str, Any] = jnp.log(jnp.clip(__A , a_min=1e-2_0 ) )
elif variance_type == "fixed_large":
__A : List[Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__A : Tuple = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__A : Union[str, Any] = variance
__A : str = state.common.betas[t]
__A : Optional[int] = (predicted_variance + 1) / 2
__A : List[str] = frac * max_log + (1 - frac) * min_log
return variance
def lowerCAmelCase_ ( self : List[str] , __A : DDPMSchedulerState , __A : jnp.ndarray , __A : int , __A : jnp.ndarray , __A : Optional[jax.random.KeyArray] = None , __A : bool = True , ):
__A : Union[str, Any] = timestep
if key is None:
__A : str = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__A , __A : Dict = jnp.split(__A , sample.shape[1] , axis=1 )
else:
__A : Optional[int] = None
# 1. compute alphas, betas
__A : Union[str, Any] = state.common.alphas_cumprod[t]
__A : str = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
__A : str = 1 - alpha_prod_t
__A : str = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__A : Any = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__A : Union[str, Any] = model_output
elif self.config.prediction_type == "v_prediction":
__A : List[str] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
""" for the FlaxDDPMScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__A : Any = jnp.clip(__A , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__A : Tuple = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__A : List[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__A : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__A : Dict = jax.random.split(__A , num=1 )
__A : Dict = jax.random.normal(__A , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__A , __A , predicted_variance=__A ) ** 0.5) * noise
__A : Union[str, Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
__A : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__A , state=__A )
def lowerCAmelCase_ ( self : Dict , __A : DDPMSchedulerState , __A : jnp.ndarray , __A : jnp.ndarray , __A : jnp.ndarray , ):
return add_noise_common(state.common , __A , __A , __A )
def lowerCAmelCase_ ( self : Optional[int] , __A : DDPMSchedulerState , __A : jnp.ndarray , __A : jnp.ndarray , __A : jnp.ndarray , ):
return get_velocity_common(state.common , __A , __A , __A )
def __len__( self : Any ):
return self.config.num_train_timesteps
| 17 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( a__ : Optional[Any] ,a__ : Union[str, Any] ,a__ : Optional[int] ) -> List[Any]:
# Initialise PyTorch model
__A : Dict = MobileBertConfig.from_json_file(a__ )
print(f"""Building PyTorch model from configuration: {config}""" )
__A : Tuple = MobileBertForPreTraining(a__ )
# Load weights from tf checkpoint
__A : Dict = load_tf_weights_in_mobilebert(a__ ,a__ ,a__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() ,a__ )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 17 | 1 |
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ) -> Union[str, Any]:
assert x is not None
assert y is not None
__A : str = len(a__ )
__A : Tuple = len(a__ )
# declaring the array for storing the dp values
__A : Dict = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 ,m + 1 ):
for j in range(1 ,n + 1 ):
__A : str = 1 if x[i - 1] == y[j - 1] else 0
__A : str = max(l[i - 1][j] ,l[i][j - 1] ,l[i - 1][j - 1] + match )
__A : Dict = """"""
__A , __A : List[Any] = m, n
while i > 0 and j > 0:
__A : Optional[int] = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
__A : Dict = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = '''AGGTAB'''
UpperCAmelCase_ : Optional[Any] = '''GXTXAYB'''
UpperCAmelCase_ : Union[str, Any] = 4
UpperCAmelCase_ : Union[str, Any] = '''GTAB'''
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = longest_common_subsequence(a, b)
print('''len =''', ln, ''', sub-sequence =''', subseq)
import doctest
doctest.testmod()
| 17 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : int = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 | 1 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 17 |
import math
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , __A : List[str]=0 ): # a graph with Node 0,1,...,N-1
__A : List[str] = n
__A : List[str] = [
[math.inf for j in range(0 , __A )] for i in range(0 , __A )
] # adjacency matrix for weight
__A : str = [
[math.inf for j in range(0 , __A )] for i in range(0 , __A )
] # dp[i][j] stores minimum distance from i to j
def lowerCAmelCase_ ( self : str , __A : Union[str, Any] , __A : Any , __A : Optional[int] ):
__A : List[Any] = w
def lowerCAmelCase_ ( self : Union[str, Any] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__A : List[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def lowerCAmelCase_ ( self : int , __A : List[str] , __A : List[str] ):
return self.dp[u][v]
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 17 | 1 |
def __SCREAMING_SNAKE_CASE ( a__ : List[Any] ,a__ : Tuple ,a__ : Any ,a__ : Any ) -> Tuple:
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
__A : Any = mf_knapsack(i - 1 ,a__ ,a__ ,a__ )
else:
__A : str = max(
mf_knapsack(i - 1 ,a__ ,a__ ,a__ ) ,mf_knapsack(i - 1 ,a__ ,a__ ,j - wt[i - 1] ) + val[i - 1] ,)
__A : Optional[Any] = val
return f[i][j]
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : List[str] ,a__ : Optional[int] ,a__ : int ) -> Tuple:
__A : int = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 ,n + 1 ):
for w_ in range(1 ,w + 1 ):
if wt[i - 1] <= w_:
__A : Optional[int] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] ,dp[i - 1][w_] )
else:
__A : int = dp[i - 1][w_]
return dp[n][w_], dp
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : list ,a__ : list ) -> Optional[Any]:
if not (isinstance(a__ ,(list, tuple) ) and isinstance(a__ ,(list, tuple) )):
raise ValueError(
"""Both the weights and values vectors must be either lists or tuples""" )
__A : Tuple = len(a__ )
if num_items != len(a__ ):
__A : Dict = (
"""The number of weights must be the same as the number of values.\n"""
f"""But got {num_items} weights and {len(a__ )} values"""
)
raise ValueError(a__ )
for i in range(a__ ):
if not isinstance(wt[i] ,a__ ):
__A : Optional[int] = (
"""All weights must be integers but got weight of """
f"""type {type(wt[i] )} at index {i}"""
)
raise TypeError(a__ )
__A , __A : Union[str, Any] = knapsack(a__ ,a__ ,a__ ,a__ )
__A : set = set()
_construct_solution(a__ ,a__ ,a__ ,a__ ,a__ )
return optimal_val, example_optional_set
def __SCREAMING_SNAKE_CASE ( a__ : list ,a__ : list ,a__ : int ,a__ : int ,a__ : set ) -> Any:
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(a__ ,a__ ,i - 1 ,a__ ,a__ )
else:
optimal_set.add(a__ )
_construct_solution(a__ ,a__ ,i - 1 ,j - wt[i - 1] ,a__ )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = [3, 2, 4, 4]
UpperCAmelCase_ : Tuple = [4, 3, 2, 3]
UpperCAmelCase_ : List[Any] = 4
UpperCAmelCase_ : List[str] = 6
UpperCAmelCase_ : Optional[int] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('''optimal_value = ''', optimal_solution)
print('''An optimal subset corresponding to the optimal value''', optimal_subset)
| 17 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : complex ,a__ : str = "x" ,a__ : float = 10**-10 ,a__ : int = 1 ,) -> complex:
__A : Tuple = symbols(a__ )
__A : List[str] = lambdify(a__ ,a__ )
__A : Any = lambdify(a__ ,diff(a__ ,a__ ) )
__A : Dict = starting_point
while True:
if diff_function(a__ ) != 0:
__A : Optional[int] = prev_guess - multiplicity * func(a__ ) / diff_function(
a__ )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__A : List[Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 17 | 1 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( a__ : Any ,a__ : Optional[Any] ) -> Union[str, Any]:
# Load checkpoint
__A : Union[str, Any] = torch.load(a__ ,map_location="""cpu""" )
__A : Optional[Any] = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
__A : Optional[int] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
__A : Tuple = v
else:
__A : Dict = v
__A : Optional[Any] = chkpt["""params"""]
__A : Tuple = {n: v for n, v in config.items() if not isinstance(a__ ,(torch.FloatTensor, numpy.ndarray) )}
__A : List[Any] = chkpt["""dico_word2id"""]
__A : Tuple = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""" ,"""""" ): i for s, i in vocab.items()}
# Save pytorch-model
__A : Any = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
__A : Union[str, Any] = pytorch_dump_folder_path + """/""" + CONFIG_NAME
__A : Optional[Any] = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(a__ ,a__ )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(a__ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(a__ ,indent=2 ) + """\n""" )
print(f"""Save vocab file to {pytorch_config_dump_path}""" )
with open(a__ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(a__ ,indent=2 ) + """\n""" )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xlm_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase_ : int = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 17 |
from math import sqrt
def __SCREAMING_SNAKE_CASE ( a__ : int = 1000000 ) -> int:
__A : int = 0
__A : int = 0
__A : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(a__ ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 17 | 1 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
UpperCAmelCase_ : Optional[Any] = 1.0_5457_1817e-34 # unit of ℏ : J * s
UpperCAmelCase_ : Union[str, Any] = 3e8 # unit of c : m * s^-1
def __SCREAMING_SNAKE_CASE ( a__ : float ,a__ : float ,a__ : float ) -> dict[str, float]:
if (force, area, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if force < 0:
raise ValueError("""Magnitude of force can not be negative""" )
if distance < 0:
raise ValueError("""Distance can not be negative""" )
if area < 0:
raise ValueError("""Area can not be negative""" )
if force == 0:
__A : List[Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
__A : Tuple = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
__A : Optional[int] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("""One and only one argument must be 0""" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase_ ( _lowercase ):
_lowercase : List[str] = ['''pixel_values''']
def __init__( self : Dict , __A : bool = True , __A : Optional[Dict[str, int]] = None , __A : PILImageResampling = PILImageResampling.BILINEAR , __A : bool = True , __A : Dict[str, int] = None , __A : bool = True , __A : Union[int, float] = 1 / 255 , __A : bool = True , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , **__A : int , ):
super().__init__(**__A )
__A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 256}
__A : Dict = get_size_dict(__A , default_to_square=__A )
__A : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__A : int = get_size_dict(__A , param_name="""crop_size""" )
__A : str = do_resize
__A : Dict = size
__A : Any = resample
__A : Optional[Any] = do_center_crop
__A : List[str] = crop_size
__A : Optional[int] = do_rescale
__A : int = rescale_factor
__A : Union[str, Any] = do_normalize
__A : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__A : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase_ ( self : Optional[Any] , __A : np.ndarray , __A : Dict[str, int] , __A : PILImageResampling = PILImageResampling.BICUBIC , __A : Optional[Union[str, ChannelDimension]] = None , **__A : List[Any] , ):
__A : str = get_size_dict(__A , default_to_square=__A )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__A : Dict = get_resize_output_image_size(__A , size=size["""shortest_edge"""] , default_to_square=__A )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : Tuple , __A : np.ndarray , __A : Dict[str, int] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : str , ):
__A : str = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(__A , size=(size["""height"""], size["""width"""]) , data_format=__A , **__A )
def lowerCAmelCase_ ( self : List[str] , __A : np.ndarray , __A : float , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Optional[int] ):
return rescale(__A , scale=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : Any , __A : np.ndarray , __A : Union[float, List[float]] , __A : Union[float, List[float]] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Tuple , ):
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : int , __A : ImageInput , __A : Optional[bool] = None , __A : Dict[str, int] = None , __A : PILImageResampling = None , __A : bool = None , __A : Dict[str, int] = None , __A : Optional[bool] = None , __A : Optional[float] = None , __A : Optional[bool] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[str, TensorType]] = None , __A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__A : Optional[int] , ):
__A : List[str] = do_resize if do_resize is not None else self.do_resize
__A : Any = size if size is not None else self.size
__A : Union[str, Any] = get_size_dict(__A , default_to_square=__A )
__A : Tuple = resample if resample is not None else self.resample
__A : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
__A : List[Any] = crop_size if crop_size is not None else self.crop_size
__A : int = get_size_dict(__A , param_name="""crop_size""" )
__A : Tuple = do_rescale if do_rescale is not None else self.do_rescale
__A : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__A : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
__A : Optional[int] = image_mean if image_mean is not None else self.image_mean
__A : List[str] = image_std if image_std is not None else self.image_std
__A : Union[str, Any] = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__A : Union[str, Any] = [to_numpy_array(__A ) for image in images]
if do_resize:
__A : int = [self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_center_crop:
__A : Optional[Any] = [self.center_crop(image=__A , size=__A ) for image in images]
if do_rescale:
__A : List[Any] = [self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
__A : Any = [self.normalize(image=__A , mean=__A , std=__A ) for image in images]
__A : int = [to_channel_dimension_format(__A , __A ) for image in images]
__A : Tuple = {"""pixel_values""": images}
return BatchFeature(data=__A , tensor_type=__A )
def lowerCAmelCase_ ( self : int , __A : List[str] , __A : List[Tuple] = None ):
__A : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__A ) != len(__A ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(__A ):
__A : str = target_sizes.numpy()
__A : int = []
for idx in range(len(__A ) ):
__A : Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=__A )
__A : Union[str, Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__A )
else:
__A : List[str] = logits.argmax(dim=1 )
__A : Tuple = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 17 | 1 |
import unittest
from knapsack import greedy_knapsack as kp
class lowerCamelCase_ ( unittest.TestCase ):
def lowerCAmelCase_ ( self : Optional[Any] ):
__A : str = [10, 20, 30, 40, 50, 60]
__A : str = [2, 4, 6, 8, 10, 12]
__A : List[Any] = 100
self.assertEqual(kp.calc_profit(__A , __A , __A ) , 210 )
def lowerCAmelCase_ ( self : str ):
self.assertRaisesRegex(__A , """max_weight must greater than zero.""" )
def lowerCAmelCase_ ( self : List[str] ):
self.assertRaisesRegex(__A , """Weight can not be negative.""" )
def lowerCAmelCase_ ( self : Optional[Any] ):
self.assertRaisesRegex(__A , """Profit can not be negative.""" )
def lowerCAmelCase_ ( self : Union[str, Any] ):
self.assertRaisesRegex(__A , """max_weight must greater than zero.""" )
def lowerCAmelCase_ ( self : Dict ):
self.assertRaisesRegex(
__A , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 17 |
class lowerCamelCase_ :
def __init__( self : Dict , __A : Tuple , __A : Optional[int] , __A : int ):
__A : List[str] = name
__A : Optional[int] = value
__A : Optional[Any] = weight
def __repr__( self : Any ):
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def lowerCAmelCase_ ( self : Union[str, Any] ):
return self.value
def lowerCAmelCase_ ( self : str ):
return self.name
def lowerCAmelCase_ ( self : str ):
return self.weight
def lowerCAmelCase_ ( self : Dict ):
return self.value / self.weight
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : Optional[int] ,a__ : Union[str, Any] ) -> int:
__A : Tuple = []
for i in range(len(a__ ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def __SCREAMING_SNAKE_CASE ( a__ : Tuple ,a__ : Any ,a__ : Optional[int] ) -> Tuple:
__A : Optional[int] = sorted(a__ ,key=a__ ,reverse=a__ )
__A : Optional[Any] = []
__A , __A : Tuple = 0.0, 0.0
for i in range(len(a__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 1 |
from __future__ import annotations
import os
from collections.abc import Mapping
UpperCAmelCase_ : List[str] = tuple[int, int]
class lowerCamelCase_ :
def __init__( self : Dict , __A : set[int] , __A : Mapping[EdgeT, int] ):
__A : set[int] = vertices
__A : dict[EdgeT, int] = {
(min(__A ), max(__A )): weight for edge, weight in edges.items()
}
def lowerCAmelCase_ ( self : Union[str, Any] , __A : EdgeT , __A : int ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__A : str = weight
def lowerCAmelCase_ ( self : Any ):
__A : Graph = Graph({min(self.vertices )} , {} )
__A : EdgeT
__A : int
__A : EdgeT
__A : int
while len(subgraph.vertices ) < len(self.vertices ):
__A : Tuple = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__A : Optional[Any] = edge
__A : Tuple = weight
subgraph.add_edge(__A , __A )
return subgraph
def __SCREAMING_SNAKE_CASE ( a__ : str = "p107_network.txt" ) -> int:
__A : str = os.path.abspath(os.path.dirname(a__ ) )
__A : str = os.path.join(a__ ,a__ )
__A : dict[EdgeT, int] = {}
__A : list[str]
__A : int
__A : int
with open(a__ ) as f:
__A : Any = f.read().strip().split("""\n""" )
__A : List[str] = [line.split(""",""" ) for line in data]
for edgea in range(1 ,len(a__ ) ):
for edgea in range(a__ ):
if adjaceny_matrix[edgea][edgea] != "-":
__A : Optional[int] = int(adjaceny_matrix[edgea][edgea] )
__A : Graph = Graph(set(range(len(a__ ) ) ) ,a__ )
__A : Graph = graph.prims_algorithm()
__A : int = sum(graph.edges.values() )
__A : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 17 |
UpperCAmelCase_ : dict[str, float] = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.35_5818,
}
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ,a__ : float ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__A : Optional[int] = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {", ".join(a__ )}"""
)
raise ValueError(a__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class lowerCamelCase_ ( _lowercase ):
_lowercase : Tuple = '''dpt'''
def __init__( self : Tuple , __A : int=768 , __A : List[str]=12 , __A : Tuple=12 , __A : List[str]=3072 , __A : List[str]="gelu" , __A : str=0.0 , __A : Optional[Any]=0.0 , __A : List[str]=0.0_2 , __A : Dict=1e-1_2 , __A : List[Any]=384 , __A : Dict=16 , __A : Union[str, Any]=3 , __A : Any=False , __A : Tuple=True , __A : List[Any]=[2, 5, 8, 11] , __A : Union[str, Any]="project" , __A : Tuple=[4, 2, 1, 0.5] , __A : Any=[96, 192, 384, 768] , __A : Dict=256 , __A : Optional[int]=-1 , __A : Any=False , __A : Dict=True , __A : Optional[int]=0.4 , __A : Any=255 , __A : Any=0.1 , __A : Dict=[1, 1024, 24, 24] , __A : Dict=[0, 1] , __A : int=None , **__A : Dict , ):
super().__init__(**__A )
__A : List[str] = hidden_size
__A : List[Any] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
__A : Union[str, Any] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
__A : List[str] = BitConfig(**__A )
elif isinstance(__A , __A ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
__A : int = BitConfig(**__A )
elif isinstance(__A , __A ):
__A : Optional[Any] = backbone_config
else:
raise ValueError(
F"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" )
__A : Optional[Any] = backbone_featmap_shape
__A : Optional[int] = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" )
else:
__A : str = None
__A : Union[str, Any] = None
__A : List[str] = []
__A : Union[str, Any] = num_hidden_layers
__A : Dict = num_attention_heads
__A : Union[str, Any] = intermediate_size
__A : Dict = hidden_act
__A : Tuple = hidden_dropout_prob
__A : Optional[int] = attention_probs_dropout_prob
__A : Optional[int] = initializer_range
__A : Optional[int] = layer_norm_eps
__A : List[str] = image_size
__A : Optional[int] = patch_size
__A : int = num_channels
__A : Tuple = qkv_bias
__A : Tuple = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" )
__A : Optional[int] = readout_type
__A : List[str] = reassemble_factors
__A : Dict = neck_hidden_sizes
__A : Dict = fusion_hidden_size
__A : Dict = head_in_index
__A : Dict = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
__A : int = use_auxiliary_head
__A : List[Any] = auxiliary_loss_weight
__A : List[Any] = semantic_loss_ignore_index
__A : List[str] = semantic_classifier_dropout
def lowerCAmelCase_ ( self : Union[str, Any] ):
__A : List[Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__A : Dict = self.backbone_config.to_dict()
__A : Optional[int] = self.__class__.model_type
return output
| 17 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ : Optional[Any] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 | 1 |
import colorsys
from PIL import Image # type: ignore
def __SCREAMING_SNAKE_CASE ( a__ : float ,a__ : float ,a__ : int ) -> float:
__A : List[Any] = x
__A : int = y
for step in range(a__ ): # noqa: B007
__A : List[Any] = a * a - b * b + x
__A : Optional[Any] = 2 * a * b + y
__A : int = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __SCREAMING_SNAKE_CASE ( a__ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def __SCREAMING_SNAKE_CASE ( a__ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(a__ ,1 ,1 ) )
def __SCREAMING_SNAKE_CASE ( a__ : int = 800 ,a__ : int = 600 ,a__ : float = -0.6 ,a__ : float = 0 ,a__ : float = 3.2 ,a__ : int = 50 ,a__ : bool = True ,) -> Image.Image:
__A : Dict = Image.new("""RGB""" ,(image_width, image_height) )
__A : str = img.load()
# loop through the image-coordinates
for image_x in range(a__ ):
for image_y in range(a__ ):
# determine the figure-coordinates based on the image-coordinates
__A : int = figure_width / image_width * image_height
__A : Union[str, Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
__A : str = figure_center_y + (image_y / image_height - 0.5) * figure_height
__A : Dict = get_distance(a__ ,a__ ,a__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__A : Dict = get_color_coded_rgb(a__ )
else:
__A : Optional[int] = get_black_and_white_rgb(a__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
UpperCAmelCase_ : Union[str, Any] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 17 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
UpperCAmelCase_ : Optional[Any] = [
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''},
{'''dataset''': '''snli''', '''config_name''': '''plain_text'''},
{'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''},
{'''dataset''': '''wiki40b''', '''config_name''': '''en'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''},
{'''dataset''': '''natural_questions''', '''config_name''': '''default'''},
]
def __SCREAMING_SNAKE_CASE ( a__ : str=True ) -> List[Any]:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_lowercase ) )
class lowerCamelCase_ ( _lowercase ):
_lowercase : Optional[int] = None
_lowercase : str = None
def lowerCAmelCase_ ( self : Dict , __A : Optional[int] , __A : Optional[Any] ):
with TemporaryDirectory() as tmp_dir:
__A : List[Any] = dataset_module_factory(__A , cache_dir=__A )
__A : Tuple = import_main_class(dataset_module.module_path , dataset=__A )
__A : DatasetBuilder = builder_cls(
cache_dir=__A , config_name=__A , hash=dataset_module.hash , )
__A : List[Any] = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=__A ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
__A : Union[str, Any] = cached_path(__A , cache_dir=__A )
self.assertTrue(os.path.exists(__A ) )
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( a__ : Dict ) -> Optional[Any]:
__A : Optional[Any] = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
__A : Union[str, Any] = dataset_module_factory("""wikipedia""" ,cache_dir=a__ )
__A : List[Any] = import_main_class(dataset_module.module_path )
__A : DatasetBuilder = builder_cls(
cache_dir=a__ ,config_name="""20220301.frr""" ,hash=dataset_module.hash ,)
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
__A : Any = None
builder_instance.download_and_prepare()
__A : Union[str, Any] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ) -> List[str]:
__A : Tuple = dataset_module_factory("""wikipedia""" ,cache_dir=a__ )
__A : str = import_main_class(dataset_module.module_path ,dataset=a__ )
__A : DatasetBuilder = builder_cls(
cache_dir=a__ ,config_name="""20220301.frr""" ,hash=dataset_module.hash ,)
__A : Optional[int] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(a__ ,a__ )
assert "train" in ds
assert isinstance(ds["""train"""] ,a__ )
assert next(iter(ds["""train"""] ) )
| 17 | 1 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
class lowerCamelCase_ ( _lowercase ):
_lowercase : Union[str, Any] = ['''pixel_values''']
def __init__( self : Any , __A : bool = True , __A : Union[int, float] = 1 / 255 , __A : bool = True , __A : int = 8 , **__A : Union[str, Any] , ):
super().__init__(**__A )
__A : List[str] = do_rescale
__A : Optional[int] = rescale_factor
__A : Dict = do_pad
__A : Any = pad_size
def lowerCAmelCase_ ( self : Optional[int] , __A : np.ndarray , __A : float , __A : Optional[Union[str, ChannelDimension]] = None , **__A : str ):
return rescale(__A , scale=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : Tuple , __A : np.ndarray , __A : int , __A : Optional[Union[str, ChannelDimension]] = None ):
__A , __A : Any = get_image_size(__A )
__A : str = (old_height // size + 1) * size - old_height
__A : Optional[Any] = (old_width // size + 1) * size - old_width
return pad(__A , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=__A )
def lowerCAmelCase_ ( self : Dict , __A : ImageInput , __A : Optional[bool] = None , __A : Optional[float] = None , __A : Optional[bool] = None , __A : Optional[int] = None , __A : Optional[Union[str, TensorType]] = None , __A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__A : Optional[int] , ):
__A : Dict = do_rescale if do_rescale is not None else self.do_rescale
__A : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
__A : Union[str, Any] = do_pad if do_pad is not None else self.do_pad
__A : Dict = pad_size if pad_size is not None else self.pad_size
__A : Tuple = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
__A : Optional[int] = [to_numpy_array(__A ) for image in images]
if do_rescale:
__A : Tuple = [self.rescale(image=__A , scale=__A ) for image in images]
if do_pad:
__A : int = [self.pad(__A , size=__A ) for image in images]
__A : Optional[int] = [to_channel_dimension_format(__A , __A ) for image in images]
__A : List[Any] = {"""pixel_values""": images}
return BatchFeature(data=__A , tensor_type=__A )
| 17 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : Optional[int] , __A : Union[str, Any] , __A : int=7 , __A : int=3 , __A : int=30 , __A : Dict=400 , __A : str=True , __A : str=None , __A : str=True , __A : Optional[int]=[0.5, 0.5, 0.5] , __A : List[str]=[0.5, 0.5, 0.5] , __A : Optional[Any]=True , __A : int=1 / 255 , __A : List[Any]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
__A : Union[str, Any] = parent
__A : Union[str, Any] = batch_size
__A : Union[str, Any] = num_channels
__A : Optional[Any] = min_resolution
__A : Union[str, Any] = max_resolution
__A : Any = do_resize
__A : Union[str, Any] = size
__A : Optional[int] = do_normalize
__A : Dict = image_mean
__A : Optional[int] = image_std
__A : Tuple = do_rescale
__A : Optional[Any] = rescale_factor
__A : Tuple = do_pad
def lowerCAmelCase_ ( self : Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase_ ( self : Optional[Any] , __A : Optional[int] , __A : Dict=False ):
if not batched:
__A : Union[str, Any] = image_inputs[0]
if isinstance(__A , Image.Image ):
__A , __A : Union[str, Any] = image.size
else:
__A , __A : Optional[int] = image.shape[1], image.shape[2]
if w < h:
__A : Optional[int] = int(self.size["""shortest_edge"""] * h / w )
__A : Dict = self.size["""shortest_edge"""]
elif w > h:
__A : Optional[Any] = self.size["""shortest_edge"""]
__A : List[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
__A : Union[str, Any] = self.size["""shortest_edge"""]
__A : str = self.size["""shortest_edge"""]
else:
__A : Any = []
for image in image_inputs:
__A , __A : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__A : Tuple = max(__A , key=lambda __A : item[0] )[0]
__A : Union[str, Any] = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase_ ( _lowercase , unittest.TestCase ):
_lowercase : Tuple = DetaImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Optional[Any] ):
__A : Tuple = DetaImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : List[str] ):
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """image_mean""" ) )
self.assertTrue(hasattr(__A , """image_std""" ) )
self.assertTrue(hasattr(__A , """do_normalize""" ) )
self.assertTrue(hasattr(__A , """do_resize""" ) )
self.assertTrue(hasattr(__A , """do_rescale""" ) )
self.assertTrue(hasattr(__A , """do_pad""" ) )
self.assertTrue(hasattr(__A , """size""" ) )
def lowerCAmelCase_ ( self : Any ):
__A : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , __A )
def lowerCAmelCase_ ( self : Optional[Any] ):
pass
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : int = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A , __A : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
__A : List[str] = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : int = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Dict ):
# Initialize image_processing
__A : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
__A : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Tuple = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Any = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase_ ( self : Tuple ):
# prepare image and target
__A : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__A : Any = json.loads(f.read() )
__A : int = {"""image_id""": 3_9769, """annotations""": target}
# encode them
__A : List[str] = DetaImageProcessor()
__A : List[str] = image_processing(images=__A , annotations=__A , return_tensors="""pt""" )
# verify pixel values
__A : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __A )
__A : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__A : Tuple = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) )
# verify boxes
__A : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A )
__A : List[str] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1e-3 ) )
# verify image_id
__A : List[str] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) )
# verify is_crowd
__A : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) )
# verify class_labels
__A : List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) )
# verify orig_size
__A : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) )
# verify size
__A : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
# prepare image, target and masks_path
__A : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__A : Tuple = json.loads(f.read() )
__A : Optional[int] = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
__A : Any = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__A : Any = DetaImageProcessor(format="""coco_panoptic""" )
__A : int = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="""pt""" )
# verify pixel values
__A : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __A )
__A : Optional[Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__A : Union[str, Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) )
# verify boxes
__A : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A )
__A : Union[str, Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1e-3 ) )
# verify image_id
__A : Any = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) )
# verify is_crowd
__A : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) )
# verify class_labels
__A : Optional[Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) )
# verify masks
__A : List[str] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __A )
# verify orig_size
__A : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) )
# verify size
__A : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
| 17 | 1 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCamelCase_ :
def __init__( self : Any , __A : Any , __A : Optional[Any]=3 , __A : List[str]=32 , __A : Tuple=3 , __A : Optional[Any]=10 , __A : Optional[Any]=[8, 16, 32, 64] , __A : int=[1, 1, 2, 1] , __A : Any=True , __A : int=True , __A : Tuple="relu" , __A : Tuple=3 , __A : Dict=None , __A : Optional[int]=["stage2", "stage3", "stage4"] , __A : Dict=[2, 3, 4] , __A : int=1 , ):
__A : Any = parent
__A : str = batch_size
__A : Any = image_size
__A : Any = num_channels
__A : List[str] = embeddings_size
__A : Optional[Any] = hidden_sizes
__A : List[Any] = depths
__A : Dict = is_training
__A : Dict = use_labels
__A : Optional[Any] = hidden_act
__A : Union[str, Any] = num_labels
__A : Optional[int] = scope
__A : Union[str, Any] = len(__A )
__A : Dict = out_features
__A : str = out_indices
__A : int = num_groups
def lowerCAmelCase_ ( self : List[Any] ):
__A : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A : int = None
if self.use_labels:
__A : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
__A : List[Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self : int ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def lowerCAmelCase_ ( self : str , __A : str , __A : Optional[Any] , __A : str ):
__A : List[str] = BitModel(config=__A )
model.to(__A )
model.eval()
__A : str = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase_ ( self : Any , __A : str , __A : List[Any] , __A : Any ):
__A : Dict = self.num_labels
__A : Optional[Any] = BitForImageClassification(__A )
model.to(__A )
model.eval()
__A : Any = model(__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Optional[Any] , __A : Optional[Any] , __A : Optional[Any] , __A : List[Any] ):
__A : List[Any] = BitBackbone(config=__A )
model.to(__A )
model.eval()
__A : Tuple = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__A : Union[str, Any] = None
__A : List[str] = BitBackbone(config=__A )
model.to(__A )
model.eval()
__A : str = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCAmelCase_ ( self : Optional[Any] ):
__A : int = self.prepare_config_and_inputs()
__A , __A , __A : Union[str, Any] = config_and_inputs
__A : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
_lowercase : Tuple = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
_lowercase : List[Any] = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
_lowercase : int = False
_lowercase : str = False
_lowercase : List[str] = False
_lowercase : List[str] = False
_lowercase : Any = False
def lowerCAmelCase_ ( self : str ):
__A : Any = BitModelTester(self )
__A : List[Any] = ConfigTester(self , config_class=__A , has_text_modality=__A )
def lowerCAmelCase_ ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase_ ( self : Tuple ):
return
@unittest.skip(reason="""Bit does not output attentions""" )
def lowerCAmelCase_ ( self : Tuple ):
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def lowerCAmelCase_ ( self : int ):
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def lowerCAmelCase_ ( self : Optional[int] ):
pass
def lowerCAmelCase_ ( self : Union[str, Any] ):
__A , __A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Optional[int] = model_class(__A )
__A : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : Optional[int] = [*signature.parameters.keys()]
__A : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __A )
def lowerCAmelCase_ ( self : Union[str, Any] ):
__A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCAmelCase_ ( self : str ):
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__A )
def lowerCAmelCase_ ( self : int ):
__A , __A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : int = model_class(config=__A )
for name, module in model.named_modules():
if isinstance(__A , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def lowerCAmelCase_ ( self : Dict ):
def check_hidden_states_output(__A : Union[str, Any] , __A : List[Any] , __A : List[Any] ):
__A : Any = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
__A : Dict = model(**self._prepare_for_class(__A , __A ) )
__A : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__A : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(__A ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__A , __A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__A : int = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__A : List[Any] = layer_type
__A : List[Any] = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : List[Any] = True
check_hidden_states_output(__A , __A , __A )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def lowerCAmelCase_ ( self : Any ):
pass
def lowerCAmelCase_ ( self : Tuple ):
__A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def lowerCAmelCase_ ( self : List[Any] ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : List[Any] = BitModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def __SCREAMING_SNAKE_CASE ( ) -> int:
__A : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase_ ( self : Optional[Any] ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def lowerCAmelCase_ ( self : List[str] ):
__A : Optional[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__A )
__A : Optional[int] = self.default_image_processor
__A : str = prepare_img()
__A : int = image_processor(images=__A , return_tensors="""pt""" ).to(__A )
# forward pass
with torch.no_grad():
__A : Union[str, Any] = model(**__A )
# verify the logits
__A : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __A )
__A : Tuple = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1e-4 ) )
@require_torch
class lowerCamelCase_ ( _lowercase , unittest.TestCase ):
_lowercase : Tuple = (BitBackbone,) if is_torch_available() else ()
_lowercase : Dict = BitConfig
_lowercase : Tuple = False
def lowerCAmelCase_ ( self : Optional[Any] ):
__A : Dict = BitModelTester(self )
| 17 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__A : List[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=a__ ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=a__ ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=a__ )
return parser.parse_args()
def __SCREAMING_SNAKE_CASE ( ) -> str:
__A : Union[str, Any] = parse_args()
# Import training_script as a module.
__A : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__A : str = script_fpath.stem
__A : int = importlib.import_module(a__ )
# Patch sys.argv
__A : List[str] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 17 | 1 |
import os
def __SCREAMING_SNAKE_CASE ( ) -> int:
__A : Optional[Any] = os.path.join(os.path.dirname(a__ ) ,"""num.txt""" )
with open(a__ ) as file_hand:
return str(sum(int(a__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 17 |
from collections.abc import Sequence
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[float] ,a__ : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(a__ ) )
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[float] ,a__ : float ) -> float:
__A : Any = 0.0
for coeff in reversed(a__ ):
__A : List[str] = result * x + coeff
return result
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCAmelCase_ : str = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 17 | 1 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : str = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ) -> List[Any]:
__A : Tuple = RobertaPreLayerNormConfig.from_pretrained(
a__ ,architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
__A : List[str] = torch.load(hf_hub_download(repo_id=a__ ,filename="""pytorch_model.bin""" ) )
__A : Dict = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
__A : Optional[Any] = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
__A : List[Any] = tensor_value
__A : List[Any] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=a__ ,config=a__ ,state_dict=a__ )
model.save_pretrained(a__ )
# convert tokenizer
__A : int = AutoTokenizer.from_pretrained(a__ )
tokenizer.save_pretrained(a__ )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase_ : Dict = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 17 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCamelCase_ ( _lowercase ):
_lowercase : Union[str, Any] = '''EncodecFeatureExtractor'''
_lowercase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : List[Any] , __A : Any , __A : Tuple ):
super().__init__(__A , __A )
__A : Dict = self.feature_extractor
__A : List[str] = False
def lowerCAmelCase_ ( self : Union[str, Any] , __A : str=None , __A : Tuple=None , __A : Dict=True ):
return self.tokenizer.get_decoder_prompt_ids(task=__A , language=__A , no_timestamps=__A )
def __call__( self : Optional[Any] , *__A : Tuple , **__A : Tuple ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__A , **__A )
__A : str = kwargs.pop("""audio""" , __A )
__A : Optional[Any] = kwargs.pop("""sampling_rate""" , __A )
__A : int = kwargs.pop("""text""" , __A )
if len(__A ) > 0:
__A : int = args[0]
__A : Dict = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if text is not None:
__A : Dict = self.tokenizer(__A , **__A )
if audio is not None:
__A : Optional[int] = self.feature_extractor(__A , *__A , sampling_rate=__A , **__A )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__A : List[Any] = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
__A : int = audio_inputs["""padding_mask"""]
return inputs
def lowerCAmelCase_ ( self : List[str] , *__A : int , **__A : Tuple ):
__A : Optional[int] = kwargs.pop("""audio""" , __A )
__A : List[str] = kwargs.pop("""padding_mask""" , __A )
if len(__A ) > 0:
__A : Dict = args[0]
__A : Optional[int] = args[1:]
if audio_values is not None:
return self._decode_audio(__A , padding_mask=__A )
else:
return self.tokenizer.batch_decode(*__A , **__A )
def lowerCAmelCase_ ( self : Optional[Any] , *__A : Dict , **__A : Any ):
return self.tokenizer.decode(*__A , **__A )
def lowerCAmelCase_ ( self : Tuple , __A : Union[str, Any] , __A : Optional = None ):
__A : List[str] = to_numpy(__A )
__A , __A , __A : Tuple = audio_values.shape
if padding_mask is None:
return list(__A )
__A : Union[str, Any] = to_numpy(__A )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__A : List[str] = seq_len - padding_mask.shape[-1]
__A : Tuple = 1 - self.feature_extractor.padding_value
__A : Optional[int] = np.pad(__A , ((0, 0), (0, difference)) , """constant""" , constant_values=__A )
__A : int = audio_values.tolist()
for i in range(__A ):
__A : str = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__A : List[Any] = sliced_audio.reshape(__A , -1 )
return audio_values
| 17 | 1 |
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ : Optional[int] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( _lowercase , unittest.TestCase ):
_lowercase : Tuple = XLNetTokenizer
_lowercase : str = XLNetTokenizerFast
_lowercase : str = True
_lowercase : List[Any] = True
def lowerCAmelCase_ ( self : str ):
super().setUp()
# We have a SentencePiece fixture for testing
__A : int = XLNetTokenizer(__A , keep_accents=__A )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self : Union[str, Any] ):
__A : int = """<s>"""
__A : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ) , __A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ) , __A )
def lowerCAmelCase_ ( self : Optional[int] ):
__A : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<eod>""" )
self.assertEqual(len(__A ) , 1006 )
def lowerCAmelCase_ ( self : Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowerCAmelCase_ ( self : str ):
__A : Any = XLNetTokenizer(__A , keep_accents=__A )
__A : Optional[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [285, 46, 10, 170, 382] )
__A : Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__A : List[str] = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
__A : str = tokenizer.convert_ids_to_tokens(__A )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def lowerCAmelCase_ ( self : Union[str, Any] ):
__A : Union[str, Any] = XLNetTokenizer(__A , do_lower_case=__A )
__A : str = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""▁he""", """ll""", """o"""] )
def lowerCAmelCase_ ( self : int ):
__A : Optional[Any] = XLNetTokenizer(__A , do_lower_case=__A )
__A : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
@slow
def lowerCAmelCase_ ( self : str ):
__A : Optional[int] = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
__A : Any = tokenizer.encode("""sequence builders""" , add_special_tokens=__A )
__A : Optional[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__A )
__A : List[str] = tokenizer.build_inputs_with_special_tokens(__A )
__A : Any = tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ):
# fmt: off
__A : Dict = {"""input_ids""": [[17, 2_1442, 270, 17, 10, 1_4645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 2_2018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 1_4431, 13, 5500, 11, 1176, 580, 13, 1_6819, 4797, 23, 17, 10, 1_7135, 658, 19, 457, 7932, 13, 184, 19, 3154, 1_7135, 6468, 19, 1404, 1_2269, 19, 4229, 5356, 1_6264, 46, 19, 17, 2_0545, 1_0395, 9, 9, 9, 11, 28, 6421, 9531, 2_0729, 17, 10, 353, 1_7022, 11, 21, 6421, 9531, 1_6949, 17, 10, 1_1509, 753, 11, 33, 95, 2421, 7385, 956, 1_4431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 2_4738, 19, 1_3203, 658, 218, 787, 21, 430, 1_8482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 2_2178, 27, 1064, 22, 956, 13, 1_1101, 1429, 5854, 2_4313, 1_8953, 40, 422, 2_4366, 68, 1758, 37, 1_0483, 1_4257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 1_3894, 3380, 23, 95, 18, 1_7634, 2288, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__A , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
| 17 |
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> int:
if not isinstance(a__ ,a__ ):
raise TypeError("""Input value must be an 'int' type""" )
__A : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = {
'''Visual-Attention-Network/van-base''': (
'''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'''
),
}
class lowerCamelCase_ ( _lowercase ):
_lowercase : Dict = '''van'''
def __init__( self : Dict , __A : List[str]=224 , __A : Any=3 , __A : Any=[7, 3, 3, 3] , __A : List[str]=[4, 2, 2, 2] , __A : Optional[Any]=[64, 128, 320, 512] , __A : Tuple=[3, 3, 12, 3] , __A : Optional[Any]=[8, 8, 4, 4] , __A : List[Any]="gelu" , __A : Optional[int]=0.0_2 , __A : Any=1e-6 , __A : str=1e-2 , __A : Union[str, Any]=0.0 , __A : str=0.0 , **__A : Dict , ):
super().__init__(**__A )
__A : List[str] = image_size
__A : List[str] = num_channels
__A : Tuple = patch_sizes
__A : Optional[Any] = strides
__A : List[str] = hidden_sizes
__A : Optional[int] = depths
__A : Union[str, Any] = mlp_ratios
__A : List[str] = hidden_act
__A : List[str] = initializer_range
__A : Tuple = layer_norm_eps
__A : Optional[Any] = layer_scale_init_value
__A : List[Any] = drop_path_rate
__A : int = dropout_rate
| 17 |
UpperCAmelCase_ : dict[tuple[int, int, int], int] = {}
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : int ,a__ : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__A : List[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__A : Dict = _calculate(days - 1 ,a__ ,late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__A : List[str] = _calculate(days - 1 ,absent + 1 ,0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__A : int = _calculate(days - 1 ,a__ ,0 )
__A : Optional[int] = state_late + state_absent + state_ontime
__A : Tuple = prizestrings
return prizestrings
def __SCREAMING_SNAKE_CASE ( a__ : int = 30 ) -> int:
return _calculate(a__ ,absent=0 ,late=0 )
if __name__ == "__main__":
print(solution())
| 17 | 1 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCamelCase_ ( _lowercase ):
_lowercase : Union[str, Any] = '''EncodecFeatureExtractor'''
_lowercase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : List[Any] , __A : Any , __A : Tuple ):
super().__init__(__A , __A )
__A : Dict = self.feature_extractor
__A : List[str] = False
def lowerCAmelCase_ ( self : Union[str, Any] , __A : str=None , __A : Tuple=None , __A : Dict=True ):
return self.tokenizer.get_decoder_prompt_ids(task=__A , language=__A , no_timestamps=__A )
def __call__( self : Optional[Any] , *__A : Tuple , **__A : Tuple ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__A , **__A )
__A : str = kwargs.pop("""audio""" , __A )
__A : Optional[Any] = kwargs.pop("""sampling_rate""" , __A )
__A : int = kwargs.pop("""text""" , __A )
if len(__A ) > 0:
__A : int = args[0]
__A : Dict = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if text is not None:
__A : Dict = self.tokenizer(__A , **__A )
if audio is not None:
__A : Optional[int] = self.feature_extractor(__A , *__A , sampling_rate=__A , **__A )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__A : List[Any] = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
__A : int = audio_inputs["""padding_mask"""]
return inputs
def lowerCAmelCase_ ( self : List[str] , *__A : int , **__A : Tuple ):
__A : Optional[int] = kwargs.pop("""audio""" , __A )
__A : List[str] = kwargs.pop("""padding_mask""" , __A )
if len(__A ) > 0:
__A : Dict = args[0]
__A : Optional[int] = args[1:]
if audio_values is not None:
return self._decode_audio(__A , padding_mask=__A )
else:
return self.tokenizer.batch_decode(*__A , **__A )
def lowerCAmelCase_ ( self : Optional[Any] , *__A : Dict , **__A : Any ):
return self.tokenizer.decode(*__A , **__A )
def lowerCAmelCase_ ( self : Tuple , __A : Union[str, Any] , __A : Optional = None ):
__A : List[str] = to_numpy(__A )
__A , __A , __A : Tuple = audio_values.shape
if padding_mask is None:
return list(__A )
__A : Union[str, Any] = to_numpy(__A )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__A : List[str] = seq_len - padding_mask.shape[-1]
__A : Tuple = 1 - self.feature_extractor.padding_value
__A : Optional[int] = np.pad(__A , ((0, 0), (0, difference)) , """constant""" , constant_values=__A )
__A : int = audio_values.tolist()
for i in range(__A ):
__A : str = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__A : List[Any] = sliced_audio.reshape(__A , -1 )
return audio_values
| 17 |
class lowerCamelCase_ :
def __init__( self : Dict , __A : int , __A : Tuple , __A : List[Any] ):
__A : Optional[int] = None
__A : Any = None
__A : int = graph
self._normalize_graph(__A , __A )
__A : str = len(__A )
__A : Optional[int] = None
def lowerCAmelCase_ ( self : int , __A : Any , __A : Optional[Any] ):
if sources is int:
__A : Dict = [sources]
if sinks is int:
__A : Optional[int] = [sinks]
if len(__A ) == 0 or len(__A ) == 0:
return
__A : str = sources[0]
__A : Dict = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__A ) > 1 or len(__A ) > 1:
__A : Optional[Any] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
__A : List[Any] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
__A : str = max_input_flow
__A : Union[str, Any] = 0
__A : Any = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__A : int = max_input_flow
__A : Optional[Any] = size - 1
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowerCAmelCase_ ( self : Optional[Any] , __A : Dict ):
__A : Dict = algorithm(self )
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , __A : str ):
__A : Any = flow_network
__A : int = flow_network.verticesCount
__A : List[Any] = flow_network.sourceIndex
__A : Union[str, Any] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__A : Optional[int] = flow_network.graph
__A : str = False
def lowerCAmelCase_ ( self : List[Any] ):
if not self.executed:
self._algorithm()
__A : Any = True
def lowerCAmelCase_ ( self : List[str] ):
pass
class lowerCamelCase_ ( _lowercase ):
def __init__( self : Any , __A : List[str] ):
super().__init__(__A )
# use this to save your result
__A : str = -1
def lowerCAmelCase_ ( self : Any ):
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class lowerCamelCase_ ( _lowercase ):
def __init__( self : List[Any] , __A : Dict ):
super().__init__(__A )
__A : Tuple = [[0] * self.verticies_count for i in range(self.verticies_count )]
__A : Optional[Any] = [0] * self.verticies_count
__A : Union[str, Any] = [0] * self.verticies_count
def lowerCAmelCase_ ( self : int ):
__A : Optional[int] = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__A : List[str] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__A : Dict = 0
while i < len(__A ):
__A : List[Any] = vertices_list[i]
__A : Optional[Any] = self.heights[vertex_index]
self.process_vertex(__A )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__A ) )
__A : Any = 0
else:
i += 1
__A : Optional[int] = sum(self.preflow[self.source_index] )
def lowerCAmelCase_ ( self : Optional[Any] , __A : str ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__A , __A )
self.relabel(__A )
def lowerCAmelCase_ ( self : Dict , __A : List[str] , __A : Optional[Any] ):
__A : Union[str, Any] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowerCAmelCase_ ( self : Optional[Any] , __A : Tuple ):
__A : Tuple = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__A : Dict = self.heights[to_index]
if min_height is not None:
__A : Optional[int] = min_height + 1
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = [0]
UpperCAmelCase_ : Dict = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
UpperCAmelCase_ : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
UpperCAmelCase_ : str = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
UpperCAmelCase_ : int = flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 17 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : str = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ) -> str | Literal[False]:
__A : Tuple = list(a__ )
__A : Optional[int] = list(a__ )
__A : int = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count += 1
__A : int = """_"""
if count > 1:
return False
else:
return "".join(a__ )
def __SCREAMING_SNAKE_CASE ( a__ : list[str] ) -> list[str]:
__A : Optional[Any] = []
while True:
__A : Tuple = ["""$"""] * len(a__ )
__A : Union[str, Any] = []
for i in range(len(a__ ) ):
for j in range(i + 1 ,len(a__ ) ):
__A : int = compare_string(binary[i] ,binary[j] )
if k is False:
__A : List[str] = """*"""
__A : Any = """*"""
temp.append("""X""" )
for i in range(len(a__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(a__ ) == 0:
return pi
__A : Optional[Any] = list(set(a__ ) )
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : Sequence[float] ) -> list[str]:
__A : List[str] = []
for minterm in minterms:
__A : List[Any] = """"""
for _ in range(a__ ):
__A : Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(a__ )
return temp
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ,a__ : int ) -> bool:
__A : Optional[Any] = list(a__ )
__A : Tuple = list(a__ )
__A : Any = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __SCREAMING_SNAKE_CASE ( a__ : list[list[int]] ,a__ : list[str] ) -> list[str]:
__A : Optional[int] = []
__A : Tuple = [0] * len(a__ )
for i in range(len(chart[0] ) ):
__A : str = 0
__A : Any = -1
for j in range(len(a__ ) ):
if chart[j][i] == 1:
count += 1
__A : Optional[Any] = j
if count == 1:
__A : int = 1
for i in range(len(a__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(a__ ) ):
__A : List[str] = 0
temp.append(prime_implicants[i] )
while True:
__A : Optional[Any] = 0
__A : Any = -1
__A : int = 0
for i in range(len(a__ ) ):
__A : List[Any] = chart[i].count(1 )
if count_n > max_n:
__A : Dict = count_n
__A : Tuple = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(a__ ) ):
__A : Union[str, Any] = 0
def __SCREAMING_SNAKE_CASE ( a__ : list[str] ,a__ : list[str] ) -> list[list[int]]:
__A : Any = [[0 for x in range(len(a__ ) )] for x in range(len(a__ ) )]
for i in range(len(a__ ) ):
__A : List[Any] = prime_implicants[i].count("""_""" )
for j in range(len(a__ ) ):
if is_for_table(prime_implicants[i] ,binary[j] ,a__ ):
__A : Union[str, Any] = 1
return chart
def __SCREAMING_SNAKE_CASE ( ) -> None:
__A : Any = int(input("""Enter the no. of variables\n""" ) )
__A : List[str] = [
float(a__ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
__A : Dict = decimal_to_binary(a__ ,a__ )
__A : Union[str, Any] = check(a__ )
print("""Prime Implicants are:""" )
print(a__ )
__A : Optional[Any] = prime_implicant_chart(a__ ,a__ )
__A : Any = selection(a__ ,a__ )
print("""Essential Prime Implicants are:""" )
print(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 17 | 1 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase_ : Dict = {
'''facebook/mask2former-swin-small-coco-instance''': (
'''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'''
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
class lowerCamelCase_ ( _lowercase ):
_lowercase : Tuple = '''mask2former'''
_lowercase : Optional[Any] = ['''swin''']
_lowercase : Dict = {'''hidden_size''': '''hidden_dim'''}
def __init__( self : Optional[Any] , __A : Optional[Dict] = None , __A : int = 256 , __A : int = 256 , __A : int = 256 , __A : int = 1024 , __A : str = "relu" , __A : int = 6 , __A : int = 10 , __A : int = 8 , __A : float = 0.0 , __A : int = 2048 , __A : bool = False , __A : bool = False , __A : int = 4 , __A : int = 255 , __A : int = 100 , __A : float = 0.1 , __A : float = 2.0 , __A : float = 5.0 , __A : float = 5.0 , __A : int = 1_2544 , __A : float = 3.0 , __A : float = 0.7_5 , __A : float = 0.0_2 , __A : float = 1.0 , __A : bool = True , __A : List[int] = [4, 8, 16, 32] , __A : bool = None , **__A : Dict , ):
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" )
__A : List[Any] = CONFIG_MAPPING["""swin"""](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=__A , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(__A , __A ):
__A : Any = backbone_config.pop("""model_type""" )
__A : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
__A : str = config_class.from_dict(__A )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
F"""Supported model types: {",".join(self.backbones_supported )}""" )
__A : int = backbone_config
__A : Optional[Any] = feature_size
__A : Union[str, Any] = mask_feature_size
__A : List[str] = hidden_dim
__A : Union[str, Any] = encoder_feedforward_dim
__A : int = activation_function
__A : Any = encoder_layers
__A : str = decoder_layers
__A : List[str] = num_attention_heads
__A : Tuple = dropout
__A : Tuple = dim_feedforward
__A : Optional[int] = pre_norm
__A : Optional[Any] = enforce_input_projection
__A : Any = common_stride
__A : Any = ignore_value
__A : List[str] = num_queries
__A : List[str] = no_object_weight
__A : str = class_weight
__A : Any = mask_weight
__A : Dict = dice_weight
__A : Optional[Any] = train_num_points
__A : str = oversample_ratio
__A : str = importance_sample_ratio
__A : List[str] = init_std
__A : Any = init_xavier_std
__A : Any = use_auxiliary_loss
__A : Optional[int] = feature_strides
__A : List[str] = output_auxiliary_logits
__A : List[str] = decoder_layers
super().__init__(**__A )
@classmethod
def lowerCAmelCase_ ( cls : str , __A : PretrainedConfig , **__A : int ):
return cls(
backbone_config=__A , **__A , )
def lowerCAmelCase_ ( self : Optional[Any] ):
__A : Optional[int] = copy.deepcopy(self.__dict__ )
__A : Any = self.backbone_config.to_dict()
__A : Optional[int] = self.__class__.model_type
return output
| 17 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ,a__ : Dict ,a__ : Union[str, Any] ,a__ : Any ) -> Optional[int]: # noqa: E741
while r - l > 1:
__A : Any = (l + r) // 2
if v[m] >= key:
__A : Optional[int] = m
else:
__A : List[Any] = m # noqa: E741
return r
def __SCREAMING_SNAKE_CASE ( a__ : list[int] ) -> int:
if len(a__ ) == 0:
return 0
__A : str = [0] * len(a__ )
__A : List[str] = 1
__A : List[Any] = v[0]
for i in range(1 ,len(a__ ) ):
if v[i] < tail[0]:
__A : int = v[i]
elif v[i] > tail[length - 1]:
__A : Union[str, Any] = v[i]
length += 1
else:
__A : Any = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 1 |
def __SCREAMING_SNAKE_CASE ( a__ : str ) -> bool:
if not all(x.isalpha() for x in string ):
raise ValueError("""String must only contain alphabetic characters.""" )
__A : Optional[int] = sorted(string.lower() )
return len(a__ ) == len(set(a__ ) )
if __name__ == "__main__":
UpperCAmelCase_ : Any = input('''Enter a string ''').strip()
UpperCAmelCase_ : Tuple = is_isogram(input_str)
print(f"""{input_str} is {"an" if isogram else "not an"} isogram.""")
| 17 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 17 | 1 |
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> bool:
return sum(i for i in range(1 ,number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
UpperCAmelCase_ : Optional[int] = int(input('''Enter number: ''').strip())
print(f"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
| 17 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class lowerCamelCase_ ( _lowercase ):
_lowercase : Dict = '''unispeech'''
def __init__( self : str , __A : Tuple=32 , __A : List[str]=768 , __A : Dict=12 , __A : Union[str, Any]=12 , __A : Tuple=3072 , __A : Any="gelu" , __A : int=0.1 , __A : Optional[int]=0.1 , __A : List[Any]=0.1 , __A : Any=0.0 , __A : List[str]=0.0 , __A : int=0.1 , __A : List[Any]=0.1 , __A : List[str]=0.0_2 , __A : List[str]=1e-5 , __A : List[Any]="group" , __A : int="gelu" , __A : Any=(512, 512, 512, 512, 512, 512, 512) , __A : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , __A : Tuple=(10, 3, 3, 3, 3, 2, 2) , __A : Optional[int]=False , __A : Any=128 , __A : Union[str, Any]=16 , __A : Optional[Any]=False , __A : str=True , __A : Dict=0.0_5 , __A : Optional[Any]=10 , __A : Dict=2 , __A : int=0.0 , __A : List[str]=10 , __A : str=0 , __A : List[str]=320 , __A : List[Any]=2 , __A : Tuple=0.1 , __A : Optional[int]=100 , __A : Any=256 , __A : Dict=256 , __A : Tuple=0.1 , __A : List[str]="mean" , __A : int=False , __A : List[str]=False , __A : List[Any]=256 , __A : str=80 , __A : Tuple=0 , __A : Tuple=1 , __A : int=2 , __A : Dict=0.5 , **__A : List[Any] , ):
super().__init__(**__A , pad_token_id=__A , bos_token_id=__A , eos_token_id=__A )
__A : Dict = hidden_size
__A : Optional[Any] = feat_extract_norm
__A : List[Any] = feat_extract_activation
__A : str = list(__A )
__A : Optional[Any] = list(__A )
__A : Optional[int] = list(__A )
__A : List[Any] = conv_bias
__A : Optional[int] = num_conv_pos_embeddings
__A : List[Any] = num_conv_pos_embedding_groups
__A : int = len(self.conv_dim )
__A : Optional[Any] = num_hidden_layers
__A : List[str] = intermediate_size
__A : Union[str, Any] = hidden_act
__A : Optional[int] = num_attention_heads
__A : Tuple = hidden_dropout
__A : Optional[Any] = attention_dropout
__A : Union[str, Any] = activation_dropout
__A : Dict = feat_proj_dropout
__A : Optional[int] = final_dropout
__A : Dict = layerdrop
__A : Optional[int] = layer_norm_eps
__A : Optional[Any] = initializer_range
__A : Optional[int] = num_ctc_classes
__A : Dict = vocab_size
__A : List[str] = do_stable_layer_norm
__A : Tuple = use_weighted_layer_sum
__A : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__A : Tuple = apply_spec_augment
__A : Union[str, Any] = mask_time_prob
__A : Optional[Any] = mask_time_length
__A : List[Any] = mask_time_min_masks
__A : List[Any] = mask_feature_prob
__A : Any = mask_feature_length
__A : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__A : Any = num_codevectors_per_group
__A : Tuple = num_codevector_groups
__A : List[str] = contrastive_logits_temperature
__A : Optional[int] = feat_quantizer_dropout
__A : int = num_negatives
__A : List[str] = codevector_dim
__A : int = proj_codevector_dim
__A : Union[str, Any] = diversity_loss_weight
# ctc loss
__A : List[str] = ctc_loss_reduction
__A : Any = ctc_zero_infinity
# pretraining loss
__A : Union[str, Any] = replace_prob
@property
def lowerCAmelCase_ ( self : int ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 17 | 1 |
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class lowerCamelCase_ ( _lowercase ):
def __lt__( self : str , __A : Tuple ):
return self[-1] < other[-1]
def __eq__( self : Union[str, Any] , __A : Tuple ):
return self[-1] == other[-1]
def __SCREAMING_SNAKE_CASE ( a__ : list ) -> list:
__A : list[Stack] = []
# sort into stacks
for element in collection:
__A : str = Stack([element] )
__A : List[str] = bisect_left(a__ ,a__ )
if i != len(a__ ):
stacks[i].append(a__ )
else:
stacks.append(a__ )
# use a heap-based merge to merge stack efficiently
__A : Optional[Any] = merge(*(reversed(a__ ) for stack in stacks) )
return collection
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = input('''Enter numbers separated by a comma:\n''').strip()
UpperCAmelCase_ : str = [int(item) for item in user_input.split(''',''')]
print(patience_sort(unsorted))
| 17 |
import fire
from utils import calculate_rouge, save_json
def __SCREAMING_SNAKE_CASE ( a__ : Any ,a__ : Tuple ,a__ : Any=None ,**a__ : Dict ) -> Optional[Any]:
__A : int = [x.strip() for x in open(a__ ).readlines()]
__A : List[str] = [x.strip() for x in open(a__ ).readlines()][: len(a__ )]
__A : List[Any] = calculate_rouge(a__ ,a__ ,**a__ )
if save_path is not None:
save_json(a__ ,a__ ,indent=a__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 17 | 1 |
def __SCREAMING_SNAKE_CASE ( a__ : Union[str, Any] ) -> int:
stooge(a__ ,0 ,len(a__ ) - 1 )
return arr
def __SCREAMING_SNAKE_CASE ( a__ : Optional[int] ,a__ : Tuple ,a__ : str ) -> Any:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
__A , __A : Optional[int] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
__A : List[Any] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(a__ ,a__ ,(h - t) )
# Recursively sort last 2/3 elements
stooge(a__ ,i + t ,(a__) )
# Recursively sort first 2/3 elements
stooge(a__ ,a__ ,(h - t) )
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = input('''Enter numbers separated by a comma:\n''').strip()
UpperCAmelCase_ : Any = [int(item) for item in user_input.split(''',''')]
print(stooge_sort(unsorted))
| 17 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( a__ : Optional[Any] ,a__ : Union[str, Any] ,a__ : Optional[int] ) -> List[Any]:
# Initialise PyTorch model
__A : Dict = MobileBertConfig.from_json_file(a__ )
print(f"""Building PyTorch model from configuration: {config}""" )
__A : Tuple = MobileBertForPreTraining(a__ )
# Load weights from tf checkpoint
__A : Dict = load_tf_weights_in_mobilebert(a__ ,a__ ,a__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() ,a__ )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 17 | 1 |
UpperCAmelCase_ : str = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __SCREAMING_SNAKE_CASE ( a__ : Union[str, Any] ,a__ : str ,a__ : Any ,a__ : List[str] ) -> str:
# Return True if there is node that has not iterated.
__A : str = [False] * len(a__ )
__A : Tuple = [s]
__A : Any = True
while queue:
__A : List[str] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(a__ )
__A : Tuple = True
__A : Any = u
return visited[t]
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ,a__ : int ,a__ : Any ) -> int:
__A : Optional[Any] = [-1] * (len(a__ ))
__A : List[Any] = 0
__A : Optional[Any] = []
__A : int = [i[:] for i in graph] # Record original cut, copy.
while bfs(a__ ,a__ ,a__ ,a__ ):
__A : int = float("""Inf""" )
__A : Optional[int] = sink
while s != source:
# Find the minimum value in select path
__A : str = min(a__ ,graph[parent[s]][s] )
__A : int = parent[s]
max_flow += path_flow
__A : Union[str, Any] = sink
while v != source:
__A : Tuple = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__A : Any = parent[v]
for i in range(len(a__ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 17 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : int = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : int = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 |
import math
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , __A : List[str]=0 ): # a graph with Node 0,1,...,N-1
__A : List[str] = n
__A : List[str] = [
[math.inf for j in range(0 , __A )] for i in range(0 , __A )
] # adjacency matrix for weight
__A : str = [
[math.inf for j in range(0 , __A )] for i in range(0 , __A )
] # dp[i][j] stores minimum distance from i to j
def lowerCAmelCase_ ( self : str , __A : Union[str, Any] , __A : Any , __A : Optional[int] ):
__A : List[Any] = w
def lowerCAmelCase_ ( self : Union[str, Any] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__A : List[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def lowerCAmelCase_ ( self : int , __A : List[str] , __A : List[str] ):
return self.dp[u][v]
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 17 | 1 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class lowerCamelCase_ ( _lowercase ):
_lowercase : Union[str, Any] = '''Wav2Vec2FeatureExtractor'''
_lowercase : Any = '''AutoTokenizer'''
def __init__( self : List[str] , __A : List[Any] , __A : Optional[Any] ):
super().__init__(__A , __A )
__A : Optional[Any] = self.feature_extractor
__A : str = False
@classmethod
def lowerCAmelCase_ ( cls : Dict , __A : List[str] , **__A : Optional[Any] ):
try:
return super().from_pretrained(__A , **__A )
except OSError:
warnings.warn(
F"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
""" include a `tokenizer_class` attribute is deprecated and will be """
"""removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"""
""" attribute to either your `config.json` or `tokenizer_config.json` """
"""file to suppress this warning: """ , __A , )
__A : Any = WavaVecaFeatureExtractor.from_pretrained(__A , **__A )
__A : List[str] = WavaVecaCTCTokenizer.from_pretrained(__A , **__A )
return cls(feature_extractor=__A , tokenizer=__A )
def __call__( self : Optional[int] , *__A : Optional[Any] , **__A : List[str] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__A , **__A )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
__A : Any = kwargs.pop("""raw_speech""" )
else:
__A : List[str] = kwargs.pop("""audio""" , __A )
__A : int = kwargs.pop("""sampling_rate""" , __A )
__A : Tuple = kwargs.pop("""text""" , __A )
if len(__A ) > 0:
__A : Union[str, Any] = args[0]
__A : Optional[Any] = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
__A : Tuple = self.feature_extractor(__A , *__A , sampling_rate=__A , **__A )
if text is not None:
__A : Any = self.tokenizer(__A , **__A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__A : List[str] = encodings["""input_ids"""]
return inputs
def lowerCAmelCase_ ( self : List[str] , *__A : int , **__A : Tuple ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*__A , **__A )
__A : Any = kwargs.pop("""input_features""" , __A )
__A : Tuple = kwargs.pop("""labels""" , __A )
if len(__A ) > 0:
__A : Any = args[0]
__A : Union[str, Any] = args[1:]
if input_features is not None:
__A : str = self.feature_extractor.pad(__A , *__A , **__A )
if labels is not None:
__A : Union[str, Any] = self.tokenizer.pad(__A , **__A )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__A : Optional[Any] = labels["""input_ids"""]
return input_features
def lowerCAmelCase_ ( self : List[Any] , *__A : List[Any] , **__A : List[str] ):
return self.tokenizer.batch_decode(*__A , **__A )
def lowerCAmelCase_ ( self : Optional[Any] , *__A : Dict , **__A : Optional[int] ):
return self.tokenizer.decode(*__A , **__A )
@contextmanager
def lowerCAmelCase_ ( self : str ):
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
__A : Dict = True
__A : List[str] = self.tokenizer
yield
__A : Tuple = self.feature_extractor
__A : int = False
| 17 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : complex ,a__ : str = "x" ,a__ : float = 10**-10 ,a__ : int = 1 ,) -> complex:
__A : Tuple = symbols(a__ )
__A : List[str] = lambdify(a__ ,a__ )
__A : Any = lambdify(a__ ,diff(a__ ,a__ ) )
__A : Dict = starting_point
while True:
if diff_function(a__ ) != 0:
__A : Optional[int] = prev_guess - multiplicity * func(a__ ) / diff_function(
a__ )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__A : List[Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 17 | 1 |
UpperCAmelCase_ : dict[str, float] = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.35_5818,
}
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ,a__ : float ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__A : Optional[int] = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {", ".join(a__ )}"""
)
raise ValueError(a__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 |
from math import sqrt
def __SCREAMING_SNAKE_CASE ( a__ : int = 1000000 ) -> int:
__A : int = 0
__A : int = 0
__A : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(a__ ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 17 | 1 |
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class lowerCamelCase_ ( _lowercase ):
def __init__( self : int , __A : List[str] , __A : Optional[int]=None , __A : Any=True , __A : List[Any]=None , **__A : List[str] ):
__A : str = parent
__A : str = config_class
__A : List[str] = has_text_modality
__A : int = kwargs
__A : str = common_properties
def lowerCAmelCase_ ( self : List[Any] ):
__A : List[Any] = self.config_class(**self.inputs_dict )
__A : List[Any] = (
["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["""vocab_size"""] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(__A , __A ) , msg=F"""`{prop}` does not exist""" )
# Test that config has the common properties as setter
for idx, name in enumerate(__A ):
try:
setattr(__A , __A , __A )
self.parent.assertEqual(
getattr(__A , __A ) , __A , msg=F"""`{name} value {idx} expected, but was {getattr(__A , __A )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(__A ):
try:
__A : Dict = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(__A , __A ) , __A , msg=F"""`{name} value {idx} expected, but was {getattr(__A , __A )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def lowerCAmelCase_ ( self : Optional[int] ):
__A : List[str] = self.config_class(**self.inputs_dict )
__A : Optional[Any] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , __A )
def lowerCAmelCase_ ( self : List[str] ):
__A : List[str] = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__A : List[Any] = os.path.join(__A , """config.json""" )
config_first.to_json_file(__A )
__A : List[str] = self.config_class.from_json_file(__A )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowerCAmelCase_ ( self : List[Any] ):
__A : Tuple = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(__A )
__A : str = self.config_class.from_pretrained(__A )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowerCAmelCase_ ( self : Optional[int] ):
__A : str = self.config_class(**self.inputs_dict )
__A : Dict = """test"""
with tempfile.TemporaryDirectory() as tmpdirname:
__A : Union[str, Any] = os.path.join(__A , __A )
config_first.save_pretrained(__A )
__A : Dict = self.config_class.from_pretrained(__A , subfolder=__A )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowerCAmelCase_ ( self : Optional[int] ):
__A : Optional[Any] = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
__A : str = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def lowerCAmelCase_ ( self : Union[str, Any] ):
if self.config_class.is_composition:
return
__A : List[str] = self.config_class()
self.parent.assertIsNotNone(__A )
def lowerCAmelCase_ ( self : Any ):
__A : Optional[Any] = copy.deepcopy(__A )
__A : Optional[int] = self.config_class(**__A )
__A : List[str] = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) )
elif getattr(__A , __A ) != value:
wrong_values.append((key, getattr(__A , __A ), value) )
if len(__A ) > 0:
__A : Dict = """\n""".join([F"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] )
raise ValueError(F"""The following keys were not properly set in the config:\n{errors}""" )
def lowerCAmelCase_ ( self : Dict ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 17 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase_ ( _lowercase ):
_lowercase : List[str] = ['''pixel_values''']
def __init__( self : Dict , __A : bool = True , __A : Optional[Dict[str, int]] = None , __A : PILImageResampling = PILImageResampling.BILINEAR , __A : bool = True , __A : Dict[str, int] = None , __A : bool = True , __A : Union[int, float] = 1 / 255 , __A : bool = True , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , **__A : int , ):
super().__init__(**__A )
__A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 256}
__A : Dict = get_size_dict(__A , default_to_square=__A )
__A : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__A : int = get_size_dict(__A , param_name="""crop_size""" )
__A : str = do_resize
__A : Dict = size
__A : Any = resample
__A : Optional[Any] = do_center_crop
__A : List[str] = crop_size
__A : Optional[int] = do_rescale
__A : int = rescale_factor
__A : Union[str, Any] = do_normalize
__A : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__A : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase_ ( self : Optional[Any] , __A : np.ndarray , __A : Dict[str, int] , __A : PILImageResampling = PILImageResampling.BICUBIC , __A : Optional[Union[str, ChannelDimension]] = None , **__A : List[Any] , ):
__A : str = get_size_dict(__A , default_to_square=__A )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__A : Dict = get_resize_output_image_size(__A , size=size["""shortest_edge"""] , default_to_square=__A )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : Tuple , __A : np.ndarray , __A : Dict[str, int] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : str , ):
__A : str = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(__A , size=(size["""height"""], size["""width"""]) , data_format=__A , **__A )
def lowerCAmelCase_ ( self : List[str] , __A : np.ndarray , __A : float , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Optional[int] ):
return rescale(__A , scale=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : Any , __A : np.ndarray , __A : Union[float, List[float]] , __A : Union[float, List[float]] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Tuple , ):
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : int , __A : ImageInput , __A : Optional[bool] = None , __A : Dict[str, int] = None , __A : PILImageResampling = None , __A : bool = None , __A : Dict[str, int] = None , __A : Optional[bool] = None , __A : Optional[float] = None , __A : Optional[bool] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[str, TensorType]] = None , __A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__A : Optional[int] , ):
__A : List[str] = do_resize if do_resize is not None else self.do_resize
__A : Any = size if size is not None else self.size
__A : Union[str, Any] = get_size_dict(__A , default_to_square=__A )
__A : Tuple = resample if resample is not None else self.resample
__A : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
__A : List[Any] = crop_size if crop_size is not None else self.crop_size
__A : int = get_size_dict(__A , param_name="""crop_size""" )
__A : Tuple = do_rescale if do_rescale is not None else self.do_rescale
__A : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__A : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
__A : Optional[int] = image_mean if image_mean is not None else self.image_mean
__A : List[str] = image_std if image_std is not None else self.image_std
__A : Union[str, Any] = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__A : Union[str, Any] = [to_numpy_array(__A ) for image in images]
if do_resize:
__A : int = [self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_center_crop:
__A : Optional[Any] = [self.center_crop(image=__A , size=__A ) for image in images]
if do_rescale:
__A : List[Any] = [self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
__A : Any = [self.normalize(image=__A , mean=__A , std=__A ) for image in images]
__A : int = [to_channel_dimension_format(__A , __A ) for image in images]
__A : Tuple = {"""pixel_values""": images}
return BatchFeature(data=__A , tensor_type=__A )
def lowerCAmelCase_ ( self : int , __A : List[str] , __A : List[Tuple] = None ):
__A : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__A ) != len(__A ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(__A ):
__A : str = target_sizes.numpy()
__A : int = []
for idx in range(len(__A ) ):
__A : Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=__A )
__A : Union[str, Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__A )
else:
__A : List[str] = logits.argmax(dim=1 )
__A : Tuple = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 17 | 1 |
import string
from math import logaa
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ) -> int:
__A : Optional[int] = document.translate(
str.maketrans("""""" ,"""""" ,string.punctuation ) ).replace("""\n""" ,"""""" )
__A : List[Any] = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ) -> tuple[int, int]:
__A : Any = corpus.lower().translate(
str.maketrans("""""" ,"""""" ,string.punctuation ) ) # strip all punctuation and replace it with ''
__A : Tuple = corpus_without_punctuation.split("""\n""" )
__A : Tuple = term.lower()
return (len([doc for doc in docs if term in doc] ), len(a__ ))
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : int ,a__ : List[str]=False ) -> float:
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) ,3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) ,3 )
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : int ) -> float:
return round(tf * idf ,3 )
| 17 |
class lowerCamelCase_ :
def __init__( self : Dict , __A : Tuple , __A : Optional[int] , __A : int ):
__A : List[str] = name
__A : Optional[int] = value
__A : Optional[Any] = weight
def __repr__( self : Any ):
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def lowerCAmelCase_ ( self : Union[str, Any] ):
return self.value
def lowerCAmelCase_ ( self : str ):
return self.name
def lowerCAmelCase_ ( self : str ):
return self.weight
def lowerCAmelCase_ ( self : Dict ):
return self.value / self.weight
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : Optional[int] ,a__ : Union[str, Any] ) -> int:
__A : Tuple = []
for i in range(len(a__ ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def __SCREAMING_SNAKE_CASE ( a__ : Tuple ,a__ : Any ,a__ : Optional[int] ) -> Tuple:
__A : Optional[int] = sorted(a__ ,key=a__ ,reverse=a__ )
__A : Optional[Any] = []
__A , __A : Tuple = 0.0, 0.0
for i in range(len(a__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 1 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ : List[Any] = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( _lowercase , unittest.TestCase ):
_lowercase : Union[str, Any] = GPTSwaTokenizer
_lowercase : Dict = False
_lowercase : Tuple = True
_lowercase : Union[str, Any] = False
def lowerCAmelCase_ ( self : int ):
super().setUp()
# We have a SentencePiece fixture for testing
__A : Tuple = GPTSwaTokenizer(__A , eos_token="""<unk>""" , bos_token="""<unk>""" , pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self : Tuple , __A : int ):
__A : Optional[Any] = """This is a test"""
__A : str = """This is a test"""
return input_text, output_text
def lowerCAmelCase_ ( self : Optional[int] ):
__A : List[str] = """<s>"""
__A : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ) , __A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ) , __A )
def lowerCAmelCase_ ( self : str ):
__A : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(__A ) , 2000 )
def lowerCAmelCase_ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def lowerCAmelCase_ ( self : List[Any] ):
__A : Optional[int] = GPTSwaTokenizer(__A )
__A : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [465, 287, 265, 631, 842] )
__A : List[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
__A , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] , )
# fmt: on
__A : Optional[int] = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(
__A , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
__A : Union[str, Any] = tokenizer.convert_ids_to_tokens(__A )
# fmt: off
self.assertListEqual(
__A , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def lowerCAmelCase_ ( self : List[Any] ):
__A : Union[str, Any] = GPTSwaTokenizer(__A )
__A : Optional[int] = ["""This is a test""", """I was born in 92000, and this is falsé."""]
__A : Tuple = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__A , __A ):
self.assertListEqual(tokenizer.encode_fast(__A ) , __A )
# Test that decode_fast returns the input text
for text, token_ids in zip(__A , __A ):
self.assertEqual(tokenizer.decode_fast(__A ) , __A )
@slow
def lowerCAmelCase_ ( self : Tuple ):
__A : Any = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
__A : Any = {"""input_ids""": [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__A , model_name="""AI-Sweden/gpt-sw3-126m""" , sequences=__A , )
| 17 |
UpperCAmelCase_ : dict[str, float] = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.35_5818,
}
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ,a__ : float ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__A : Optional[int] = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {", ".join(a__ )}"""
)
raise ValueError(a__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 1 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __SCREAMING_SNAKE_CASE ( ) -> str:
raise RuntimeError("""CUDA out of memory.""" )
class lowerCamelCase_ ( nn.Module ):
def __init__( self : str ):
super().__init__()
__A : Optional[int] = nn.Linear(3 , 4 )
__A : List[str] = nn.BatchNormad(4 )
__A : int = nn.Linear(4 , 5 )
def lowerCAmelCase_ ( self : int , __A : Union[str, Any] ):
return self.lineara(self.batchnorm(self.lineara(__A ) ) )
class lowerCamelCase_ ( unittest.TestCase ):
def lowerCAmelCase_ ( self : int ):
__A : int = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__A : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(__A )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__A , [128, 64, 32, 16, 8] )
def lowerCAmelCase_ ( self : Any ):
__A : Dict = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__A : str , __A : Any ):
nonlocal batch_sizes
batch_sizes.append(__A )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__A , __A : Union[str, Any] = mock_training_loop_function("""hello""" )
self.assertListEqual(__A , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, """hello"""] )
def lowerCAmelCase_ ( self : Union[str, Any] ):
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__A : List[Any] ):
pass
with self.assertRaises(__A ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def lowerCAmelCase_ ( self : List[Any] ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__A : Union[str, Any] ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__A ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def lowerCAmelCase_ ( self : List[Any] ):
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__A : str , __A : List[str] , __A : Union[str, Any] ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__A ) as cm:
mock_training_loop_function(128 , """hello""" , """world""" )
self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] )
self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0] )
def lowerCAmelCase_ ( self : Tuple ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__A : Any ):
raise ValueError("""Oops, we had an error!""" )
with self.assertRaises(__A ) as cm:
mock_training_loop_function()
self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] )
@require_cuda
def lowerCAmelCase_ ( self : Optional[int] ):
__A : Optional[int] = torch.cuda.memory_allocated()
__A : Optional[Any] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __A )
__A : Optional[int] = release_memory(__A )
self.assertEqual(torch.cuda.memory_allocated() , __A )
| 17 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ : Optional[Any] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 | 1 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
@property
def lowerCAmelCase_ ( self : str ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase_ ( self : Tuple ):
__A : List[str] = ort.SessionOptions()
__A : List[str] = False
return options
def lowerCAmelCase_ ( self : Optional[int] ):
__A : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
__A : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
__A : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
__A : int = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=__A , feature_extractor=__A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__A )
__A : List[Any] = """A red cat sitting on a park bench"""
__A : Dict = np.random.RandomState(0 )
__A : str = pipe(
prompt=__A , image=__A , mask_image=__A , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=15 , generator=__A , output_type="""np""" , )
__A : List[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 17 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
UpperCAmelCase_ : Optional[Any] = [
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''},
{'''dataset''': '''snli''', '''config_name''': '''plain_text'''},
{'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''},
{'''dataset''': '''wiki40b''', '''config_name''': '''en'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''},
{'''dataset''': '''natural_questions''', '''config_name''': '''default'''},
]
def __SCREAMING_SNAKE_CASE ( a__ : str=True ) -> List[Any]:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_lowercase ) )
class lowerCamelCase_ ( _lowercase ):
_lowercase : Optional[int] = None
_lowercase : str = None
def lowerCAmelCase_ ( self : Dict , __A : Optional[int] , __A : Optional[Any] ):
with TemporaryDirectory() as tmp_dir:
__A : List[Any] = dataset_module_factory(__A , cache_dir=__A )
__A : Tuple = import_main_class(dataset_module.module_path , dataset=__A )
__A : DatasetBuilder = builder_cls(
cache_dir=__A , config_name=__A , hash=dataset_module.hash , )
__A : List[Any] = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=__A ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
__A : Union[str, Any] = cached_path(__A , cache_dir=__A )
self.assertTrue(os.path.exists(__A ) )
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( a__ : Dict ) -> Optional[Any]:
__A : Optional[Any] = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
__A : Union[str, Any] = dataset_module_factory("""wikipedia""" ,cache_dir=a__ )
__A : List[Any] = import_main_class(dataset_module.module_path )
__A : DatasetBuilder = builder_cls(
cache_dir=a__ ,config_name="""20220301.frr""" ,hash=dataset_module.hash ,)
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
__A : Any = None
builder_instance.download_and_prepare()
__A : Union[str, Any] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ) -> List[str]:
__A : Tuple = dataset_module_factory("""wikipedia""" ,cache_dir=a__ )
__A : str = import_main_class(dataset_module.module_path ,dataset=a__ )
__A : DatasetBuilder = builder_cls(
cache_dir=a__ ,config_name="""20220301.frr""" ,hash=dataset_module.hash ,)
__A : Optional[int] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(a__ ,a__ )
assert "train" in ds
assert isinstance(ds["""train"""] ,a__ )
assert next(iter(ds["""train"""] ) )
| 17 | 1 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
UpperCAmelCase_ : int = 10
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : int ,a__ : list[int] ,a__ : int ) -> int:
for i in range(a__ ,a__ ):
if array[i] == target:
return i
return -1
def __SCREAMING_SNAKE_CASE ( a__ : list[int] ,a__ : int ) -> int:
__A : Tuple = 0
__A : Dict = len(a__ )
while left <= right:
if right - left < precision:
return lin_search(a__ ,a__ ,a__ ,a__ )
__A : Optional[Any] = (left + right) // 3 + 1
__A : Any = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__A : Union[str, Any] = one_third - 1
elif array[two_third] < target:
__A : List[Any] = two_third + 1
else:
__A : List[Any] = one_third + 1
__A : int = two_third - 1
else:
return -1
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : int ,a__ : list[int] ,a__ : int ) -> int:
if left < right:
if right - left < precision:
return lin_search(a__ ,a__ ,a__ ,a__ )
__A : Optional[Any] = (left + right) // 3 + 1
__A : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(a__ ,one_third - 1 ,a__ ,a__ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 ,a__ ,a__ ,a__ )
else:
return rec_ternary_search(one_third + 1 ,two_third - 1 ,a__ ,a__ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Any = input('''Enter numbers separated by comma:\n''').strip()
UpperCAmelCase_ : List[str] = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
UpperCAmelCase_ : Optional[int] = int(input('''Enter the number to be found in the list:\n''').strip())
UpperCAmelCase_ : List[Any] = ite_ternary_search(collection, target)
UpperCAmelCase_ : Optional[int] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"""Iterative search: {target} found at positions: {resulta}""")
print(f"""Recursive search: {target} found at positions: {resulta}""")
else:
print('''Not found''')
| 17 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : Optional[int] , __A : Union[str, Any] , __A : int=7 , __A : int=3 , __A : int=30 , __A : Dict=400 , __A : str=True , __A : str=None , __A : str=True , __A : Optional[int]=[0.5, 0.5, 0.5] , __A : List[str]=[0.5, 0.5, 0.5] , __A : Optional[Any]=True , __A : int=1 / 255 , __A : List[Any]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
__A : Union[str, Any] = parent
__A : Union[str, Any] = batch_size
__A : Union[str, Any] = num_channels
__A : Optional[Any] = min_resolution
__A : Union[str, Any] = max_resolution
__A : Any = do_resize
__A : Union[str, Any] = size
__A : Optional[int] = do_normalize
__A : Dict = image_mean
__A : Optional[int] = image_std
__A : Tuple = do_rescale
__A : Optional[Any] = rescale_factor
__A : Tuple = do_pad
def lowerCAmelCase_ ( self : Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase_ ( self : Optional[Any] , __A : Optional[int] , __A : Dict=False ):
if not batched:
__A : Union[str, Any] = image_inputs[0]
if isinstance(__A , Image.Image ):
__A , __A : Union[str, Any] = image.size
else:
__A , __A : Optional[int] = image.shape[1], image.shape[2]
if w < h:
__A : Optional[int] = int(self.size["""shortest_edge"""] * h / w )
__A : Dict = self.size["""shortest_edge"""]
elif w > h:
__A : Optional[Any] = self.size["""shortest_edge"""]
__A : List[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
__A : Union[str, Any] = self.size["""shortest_edge"""]
__A : str = self.size["""shortest_edge"""]
else:
__A : Any = []
for image in image_inputs:
__A , __A : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__A : Tuple = max(__A , key=lambda __A : item[0] )[0]
__A : Union[str, Any] = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase_ ( _lowercase , unittest.TestCase ):
_lowercase : Tuple = DetaImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Optional[Any] ):
__A : Tuple = DetaImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : List[str] ):
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """image_mean""" ) )
self.assertTrue(hasattr(__A , """image_std""" ) )
self.assertTrue(hasattr(__A , """do_normalize""" ) )
self.assertTrue(hasattr(__A , """do_resize""" ) )
self.assertTrue(hasattr(__A , """do_rescale""" ) )
self.assertTrue(hasattr(__A , """do_pad""" ) )
self.assertTrue(hasattr(__A , """size""" ) )
def lowerCAmelCase_ ( self : Any ):
__A : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , __A )
def lowerCAmelCase_ ( self : Optional[Any] ):
pass
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : int = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A , __A : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
__A : List[str] = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : int = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Dict ):
# Initialize image_processing
__A : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
__A : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Tuple = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Any = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase_ ( self : Tuple ):
# prepare image and target
__A : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__A : Any = json.loads(f.read() )
__A : int = {"""image_id""": 3_9769, """annotations""": target}
# encode them
__A : List[str] = DetaImageProcessor()
__A : List[str] = image_processing(images=__A , annotations=__A , return_tensors="""pt""" )
# verify pixel values
__A : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __A )
__A : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__A : Tuple = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) )
# verify boxes
__A : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A )
__A : List[str] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1e-3 ) )
# verify image_id
__A : List[str] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) )
# verify is_crowd
__A : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) )
# verify class_labels
__A : List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) )
# verify orig_size
__A : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) )
# verify size
__A : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
# prepare image, target and masks_path
__A : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__A : Tuple = json.loads(f.read() )
__A : Optional[int] = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
__A : Any = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__A : Any = DetaImageProcessor(format="""coco_panoptic""" )
__A : int = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="""pt""" )
# verify pixel values
__A : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __A )
__A : Optional[Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__A : Union[str, Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) )
# verify boxes
__A : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A )
__A : Union[str, Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1e-3 ) )
# verify image_id
__A : Any = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) )
# verify is_crowd
__A : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) )
# verify class_labels
__A : Optional[Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) )
# verify masks
__A : List[str] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __A )
# verify orig_size
__A : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) )
# verify size
__A : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
| 17 | 1 |
def __SCREAMING_SNAKE_CASE ( a__ : Tuple ,a__ : List[str] ) -> Tuple:
print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" )
for i in range(a__ ):
for j in range(a__ ):
if dist[i][j] != float("""inf""" ):
print(int(dist[i][j] ) ,end="""\t""" )
else:
print("""INF""" ,end="""\t""" )
print()
def __SCREAMING_SNAKE_CASE ( a__ : Optional[int] ,a__ : int ) -> Any:
__A : Optional[Any] = [[float("""inf""" ) for _ in range(a__ )] for _ in range(a__ )]
for i in range(a__ ):
for j in range(a__ ):
__A : Union[str, Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(a__ ):
# looping through rows of graph array
for i in range(a__ ):
# looping through columns of graph array
for j in range(a__ ):
if (
dist[i][k] != float("""inf""" )
and dist[k][j] != float("""inf""" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
__A : Any = dist[i][k] + dist[k][j]
_print_dist(a__ ,a__ )
return dist, v
if __name__ == "__main__":
UpperCAmelCase_ : str = int(input('''Enter number of vertices: '''))
UpperCAmelCase_ : List[Any] = int(input('''Enter number of edges: '''))
UpperCAmelCase_ : int = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
UpperCAmelCase_ : Union[str, Any] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
UpperCAmelCase_ : int = int(input('''Enter source:'''))
UpperCAmelCase_ : Tuple = int(input('''Enter destination:'''))
UpperCAmelCase_ : int = float(input('''Enter weight:'''))
UpperCAmelCase_ : List[Any] = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 17 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__A : List[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=a__ ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=a__ ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=a__ )
return parser.parse_args()
def __SCREAMING_SNAKE_CASE ( ) -> str:
__A : Union[str, Any] = parse_args()
# Import training_script as a module.
__A : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__A : str = script_fpath.stem
__A : int = importlib.import_module(a__ )
# Patch sys.argv
__A : List[str] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 17 | 1 |
from math import sqrt
def __SCREAMING_SNAKE_CASE ( a__ : int = 1000000 ) -> int:
__A : int = 0
__A : int = 0
__A : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(a__ ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 17 |
from collections.abc import Sequence
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[float] ,a__ : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(a__ ) )
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[float] ,a__ : float ) -> float:
__A : Any = 0.0
for coeff in reversed(a__ ):
__A : List[str] = result * x + coeff
return result
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCAmelCase_ : str = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 17 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : Optional[int] , __A : Union[str, Any] , __A : int=7 , __A : int=3 , __A : int=30 , __A : Dict=400 , __A : str=True , __A : str=None , __A : str=True , __A : Optional[int]=[0.5, 0.5, 0.5] , __A : List[str]=[0.5, 0.5, 0.5] , __A : Optional[Any]=True , __A : int=1 / 255 , __A : List[Any]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
__A : Union[str, Any] = parent
__A : Union[str, Any] = batch_size
__A : Union[str, Any] = num_channels
__A : Optional[Any] = min_resolution
__A : Union[str, Any] = max_resolution
__A : Any = do_resize
__A : Union[str, Any] = size
__A : Optional[int] = do_normalize
__A : Dict = image_mean
__A : Optional[int] = image_std
__A : Tuple = do_rescale
__A : Optional[Any] = rescale_factor
__A : Tuple = do_pad
def lowerCAmelCase_ ( self : Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase_ ( self : Optional[Any] , __A : Optional[int] , __A : Dict=False ):
if not batched:
__A : Union[str, Any] = image_inputs[0]
if isinstance(__A , Image.Image ):
__A , __A : Union[str, Any] = image.size
else:
__A , __A : Optional[int] = image.shape[1], image.shape[2]
if w < h:
__A : Optional[int] = int(self.size["""shortest_edge"""] * h / w )
__A : Dict = self.size["""shortest_edge"""]
elif w > h:
__A : Optional[Any] = self.size["""shortest_edge"""]
__A : List[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
__A : Union[str, Any] = self.size["""shortest_edge"""]
__A : str = self.size["""shortest_edge"""]
else:
__A : Any = []
for image in image_inputs:
__A , __A : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__A : Tuple = max(__A , key=lambda __A : item[0] )[0]
__A : Union[str, Any] = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase_ ( _lowercase , unittest.TestCase ):
_lowercase : Tuple = DetaImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Optional[Any] ):
__A : Tuple = DetaImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : List[str] ):
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """image_mean""" ) )
self.assertTrue(hasattr(__A , """image_std""" ) )
self.assertTrue(hasattr(__A , """do_normalize""" ) )
self.assertTrue(hasattr(__A , """do_resize""" ) )
self.assertTrue(hasattr(__A , """do_rescale""" ) )
self.assertTrue(hasattr(__A , """do_pad""" ) )
self.assertTrue(hasattr(__A , """size""" ) )
def lowerCAmelCase_ ( self : Any ):
__A : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , __A )
def lowerCAmelCase_ ( self : Optional[Any] ):
pass
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : int = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A , __A : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
__A : List[str] = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : int = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Dict ):
# Initialize image_processing
__A : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
__A : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Tuple = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Any = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase_ ( self : Tuple ):
# prepare image and target
__A : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__A : Any = json.loads(f.read() )
__A : int = {"""image_id""": 3_9769, """annotations""": target}
# encode them
__A : List[str] = DetaImageProcessor()
__A : List[str] = image_processing(images=__A , annotations=__A , return_tensors="""pt""" )
# verify pixel values
__A : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __A )
__A : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__A : Tuple = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) )
# verify boxes
__A : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A )
__A : List[str] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1e-3 ) )
# verify image_id
__A : List[str] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) )
# verify is_crowd
__A : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) )
# verify class_labels
__A : List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) )
# verify orig_size
__A : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) )
# verify size
__A : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
# prepare image, target and masks_path
__A : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__A : Tuple = json.loads(f.read() )
__A : Optional[int] = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
__A : Any = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__A : Any = DetaImageProcessor(format="""coco_panoptic""" )
__A : int = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="""pt""" )
# verify pixel values
__A : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __A )
__A : Optional[Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__A : Union[str, Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) )
# verify boxes
__A : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A )
__A : Union[str, Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1e-3 ) )
# verify image_id
__A : Any = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) )
# verify is_crowd
__A : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) )
# verify class_labels
__A : Optional[Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) )
# verify masks
__A : List[str] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __A )
# verify orig_size
__A : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) )
# verify size
__A : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
| 17 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCamelCase_ ( _lowercase ):
_lowercase : Union[str, Any] = '''EncodecFeatureExtractor'''
_lowercase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : List[Any] , __A : Any , __A : Tuple ):
super().__init__(__A , __A )
__A : Dict = self.feature_extractor
__A : List[str] = False
def lowerCAmelCase_ ( self : Union[str, Any] , __A : str=None , __A : Tuple=None , __A : Dict=True ):
return self.tokenizer.get_decoder_prompt_ids(task=__A , language=__A , no_timestamps=__A )
def __call__( self : Optional[Any] , *__A : Tuple , **__A : Tuple ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__A , **__A )
__A : str = kwargs.pop("""audio""" , __A )
__A : Optional[Any] = kwargs.pop("""sampling_rate""" , __A )
__A : int = kwargs.pop("""text""" , __A )
if len(__A ) > 0:
__A : int = args[0]
__A : Dict = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if text is not None:
__A : Dict = self.tokenizer(__A , **__A )
if audio is not None:
__A : Optional[int] = self.feature_extractor(__A , *__A , sampling_rate=__A , **__A )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__A : List[Any] = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
__A : int = audio_inputs["""padding_mask"""]
return inputs
def lowerCAmelCase_ ( self : List[str] , *__A : int , **__A : Tuple ):
__A : Optional[int] = kwargs.pop("""audio""" , __A )
__A : List[str] = kwargs.pop("""padding_mask""" , __A )
if len(__A ) > 0:
__A : Dict = args[0]
__A : Optional[int] = args[1:]
if audio_values is not None:
return self._decode_audio(__A , padding_mask=__A )
else:
return self.tokenizer.batch_decode(*__A , **__A )
def lowerCAmelCase_ ( self : Optional[Any] , *__A : Dict , **__A : Any ):
return self.tokenizer.decode(*__A , **__A )
def lowerCAmelCase_ ( self : Tuple , __A : Union[str, Any] , __A : Optional = None ):
__A : List[str] = to_numpy(__A )
__A , __A , __A : Tuple = audio_values.shape
if padding_mask is None:
return list(__A )
__A : Union[str, Any] = to_numpy(__A )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__A : List[str] = seq_len - padding_mask.shape[-1]
__A : Tuple = 1 - self.feature_extractor.padding_value
__A : Optional[int] = np.pad(__A , ((0, 0), (0, difference)) , """constant""" , constant_values=__A )
__A : int = audio_values.tolist()
for i in range(__A ):
__A : str = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__A : List[Any] = sliced_audio.reshape(__A , -1 )
return audio_values
| 17 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : Any = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
UpperCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 |
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> int:
if not isinstance(a__ ,a__ ):
raise TypeError("""Input value must be an 'int' type""" )
__A : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 1 |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
UpperCAmelCase_ : Optional[int] = 4
UpperCAmelCase_ : List[str] = 3
class lowerCamelCase_ ( _lowercase ):
pass
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ) -> Optional[Any]:
for shard in shards:
for i in range(a__ ):
yield {"i": i, "shard": shard}
def __SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
__A : Any = int(os.environ["""RANK"""] )
__A : str = int(os.environ["""WORLD_SIZE"""] )
__A : Any = ArgumentParser()
parser.add_argument("""--streaming""" ,type=a__ )
parser.add_argument("""--local_rank""" ,type=a__ )
parser.add_argument("""--num_workers""" ,type=a__ ,default=0 )
__A : Optional[int] = parser.parse_args()
__A : Optional[Any] = args.streaming
__A : Optional[Any] = args.num_workers
__A : Tuple = {"""shards""": [f"""shard_{shard_idx}""" for shard_idx in range(a__ )]}
__A : Optional[int] = IterableDataset.from_generator(a__ ,gen_kwargs=a__ )
if not streaming:
__A : List[str] = Dataset.from_list(list(a__ ) )
__A : Optional[int] = split_dataset_by_node(a__ ,rank=a__ ,world_size=a__ )
__A : List[Any] = torch.utils.data.DataLoader(a__ ,num_workers=a__ )
__A : List[Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
__A : int = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
__A : Optional[Any] = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 17 |
UpperCAmelCase_ : dict[tuple[int, int, int], int] = {}
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : int ,a__ : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__A : List[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__A : Dict = _calculate(days - 1 ,a__ ,late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__A : List[str] = _calculate(days - 1 ,absent + 1 ,0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__A : int = _calculate(days - 1 ,a__ ,0 )
__A : Optional[int] = state_late + state_absent + state_ontime
__A : Tuple = prizestrings
return prizestrings
def __SCREAMING_SNAKE_CASE ( a__ : int = 30 ) -> int:
return _calculate(a__ ,absent=0 ,late=0 )
if __name__ == "__main__":
print(solution())
| 17 | 1 |
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class lowerCamelCase_ ( unittest.TestCase ):
def lowerCAmelCase_ ( self : Optional[int] ):
__A : str = get_activation("""swish""" )
self.assertIsInstance(__A , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def lowerCAmelCase_ ( self : Optional[int] ):
__A : List[str] = get_activation("""silu""" )
self.assertIsInstance(__A , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def lowerCAmelCase_ ( self : Tuple ):
__A : Optional[int] = get_activation("""mish""" )
self.assertIsInstance(__A , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def lowerCAmelCase_ ( self : int ):
__A : int = get_activation("""gelu""" )
self.assertIsInstance(__A , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 17 |
class lowerCamelCase_ :
def __init__( self : Dict , __A : int , __A : Tuple , __A : List[Any] ):
__A : Optional[int] = None
__A : Any = None
__A : int = graph
self._normalize_graph(__A , __A )
__A : str = len(__A )
__A : Optional[int] = None
def lowerCAmelCase_ ( self : int , __A : Any , __A : Optional[Any] ):
if sources is int:
__A : Dict = [sources]
if sinks is int:
__A : Optional[int] = [sinks]
if len(__A ) == 0 or len(__A ) == 0:
return
__A : str = sources[0]
__A : Dict = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__A ) > 1 or len(__A ) > 1:
__A : Optional[Any] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
__A : List[Any] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
__A : str = max_input_flow
__A : Union[str, Any] = 0
__A : Any = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__A : int = max_input_flow
__A : Optional[Any] = size - 1
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowerCAmelCase_ ( self : Optional[Any] , __A : Dict ):
__A : Dict = algorithm(self )
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , __A : str ):
__A : Any = flow_network
__A : int = flow_network.verticesCount
__A : List[Any] = flow_network.sourceIndex
__A : Union[str, Any] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__A : Optional[int] = flow_network.graph
__A : str = False
def lowerCAmelCase_ ( self : List[Any] ):
if not self.executed:
self._algorithm()
__A : Any = True
def lowerCAmelCase_ ( self : List[str] ):
pass
class lowerCamelCase_ ( _lowercase ):
def __init__( self : Any , __A : List[str] ):
super().__init__(__A )
# use this to save your result
__A : str = -1
def lowerCAmelCase_ ( self : Any ):
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class lowerCamelCase_ ( _lowercase ):
def __init__( self : List[Any] , __A : Dict ):
super().__init__(__A )
__A : Tuple = [[0] * self.verticies_count for i in range(self.verticies_count )]
__A : Optional[Any] = [0] * self.verticies_count
__A : Union[str, Any] = [0] * self.verticies_count
def lowerCAmelCase_ ( self : int ):
__A : Optional[int] = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__A : List[str] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__A : Dict = 0
while i < len(__A ):
__A : List[Any] = vertices_list[i]
__A : Optional[Any] = self.heights[vertex_index]
self.process_vertex(__A )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__A ) )
__A : Any = 0
else:
i += 1
__A : Optional[int] = sum(self.preflow[self.source_index] )
def lowerCAmelCase_ ( self : Optional[Any] , __A : str ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__A , __A )
self.relabel(__A )
def lowerCAmelCase_ ( self : Dict , __A : List[str] , __A : Optional[Any] ):
__A : Union[str, Any] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowerCAmelCase_ ( self : Optional[Any] , __A : Tuple ):
__A : Tuple = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__A : Dict = self.heights[to_index]
if min_height is not None:
__A : Optional[int] = min_height + 1
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = [0]
UpperCAmelCase_ : Dict = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
UpperCAmelCase_ : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
UpperCAmelCase_ : str = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
UpperCAmelCase_ : int = flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 17 | 1 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
UpperCAmelCase_ : Any = {
'''b0''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1_408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1_536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1_792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2_304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2_560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def __SCREAMING_SNAKE_CASE ( a__ : str ) -> str:
__A : List[Any] = EfficientNetConfig()
__A : Union[str, Any] = CONFIG_MAP[model_name]["""hidden_dim"""]
__A : str = CONFIG_MAP[model_name]["""width_coef"""]
__A : List[Any] = CONFIG_MAP[model_name]["""depth_coef"""]
__A : Union[str, Any] = CONFIG_MAP[model_name]["""image_size"""]
__A : Tuple = CONFIG_MAP[model_name]["""dropout_rate"""]
__A : Optional[Any] = CONFIG_MAP[model_name]["""dw_padding"""]
__A : Optional[int] = """huggingface/label-files"""
__A : List[Any] = """imagenet-1k-id2label.json"""
__A : str = 1000
__A : Optional[Any] = json.load(open(hf_hub_download(a__ ,a__ ,repo_type="""dataset""" ) ,"""r""" ) )
__A : Optional[int] = {int(a__ ): v for k, v in idalabel.items()}
__A : Any = idalabel
__A : Any = {v: k for k, v in idalabel.items()}
return config
def __SCREAMING_SNAKE_CASE ( ) -> int:
__A : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__A : int = Image.open(requests.get(a__ ,stream=a__ ).raw )
return im
def __SCREAMING_SNAKE_CASE ( a__ : Any ) -> Optional[int]:
__A : Any = CONFIG_MAP[model_name]["""image_size"""]
__A : Any = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} ,image_mean=[0.485, 0.456, 0.406] ,image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] ,do_center_crop=a__ ,)
return preprocessor
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ) -> str:
__A : Optional[int] = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
__A : Union[str, Any] = sorted(set(a__ ) )
__A : Union[str, Any] = len(a__ )
__A : Tuple = {b: str(a__ ) for b, i in zip(a__ ,range(a__ ) )}
__A : Tuple = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
__A : Any = block_name_mapping[b]
rename_keys.append((f"""block{b}_expand_conv/kernel:0""", f"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((f"""block{b}_expand_bn/gamma:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((f"""block{b}_expand_bn/beta:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(f"""block{b}_expand_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(f"""block{b}_expand_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(f"""block{b}_dwconv/depthwise_kernel:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((f"""block{b}_bn/gamma:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((f"""block{b}_bn/beta:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(f"""block{b}_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(f"""block{b}_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((f"""block{b}_se_reduce/kernel:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((f"""block{b}_se_reduce/bias:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((f"""block{b}_se_expand/kernel:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((f"""block{b}_se_expand/bias:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(f"""block{b}_project_conv/kernel:0""", f"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((f"""block{b}_project_bn/gamma:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((f"""block{b}_project_bn/beta:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(f"""block{b}_project_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(f"""block{b}_project_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
__A : Dict = {}
for item in rename_keys:
if item[0] in original_param_names:
__A : Optional[Any] = """efficientnet.""" + item[1]
__A : Dict = """classifier.weight"""
__A : List[str] = """classifier.bias"""
return key_mapping
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ,a__ : str ,a__ : Any ) -> Optional[int]:
for key, value in tf_params.items():
if "normalization" in key:
continue
__A : Any = key_mapping[key]
if "_conv" in key and "kernel" in key:
__A : Dict = torch.from_numpy(a__ ).permute(3 ,2 ,0 ,1 )
elif "depthwise_kernel" in key:
__A : List[Any] = torch.from_numpy(a__ ).permute(2 ,3 ,0 ,1 )
elif "kernel" in key:
__A : Tuple = torch.from_numpy(np.transpose(a__ ) )
else:
__A : List[Any] = torch.from_numpy(a__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(a__ )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( a__ : Optional[Any] ,a__ : Dict ,a__ : Any ,a__ : Dict ) -> Tuple:
__A : Union[str, Any] = model_classes[model_name](
include_top=a__ ,weights="""imagenet""" ,input_tensor=a__ ,input_shape=a__ ,pooling=a__ ,classes=1000 ,classifier_activation="""softmax""" ,)
__A : str = original_model.trainable_variables
__A : Tuple = original_model.non_trainable_variables
__A : List[str] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__A : List[Any] = param.numpy()
__A : Union[str, Any] = list(tf_params.keys() )
# Load HuggingFace model
__A : str = get_efficientnet_config(a__ )
__A : int = EfficientNetForImageClassification(a__ ).eval()
__A : Optional[Any] = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
__A : Dict = rename_keys(a__ )
replace_params(a__ ,a__ ,a__ )
# Initialize preprocessor and preprocess input image
__A : Tuple = convert_image_processor(a__ )
__A : Union[str, Any] = preprocessor(images=prepare_img() ,return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
__A : Union[str, Any] = hf_model(**a__ )
__A : str = outputs.logits.detach().numpy()
# Original model inference
__A : Union[str, Any] = False
__A : List[str] = CONFIG_MAP[model_name]["""image_size"""]
__A : Optional[Any] = prepare_img().resize((image_size, image_size) ,resample=PIL.Image.NEAREST )
__A : Dict = image.img_to_array(a__ )
__A : int = np.expand_dims(a__ ,axis=0 )
__A : List[str] = original_model.predict(a__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(a__ ,a__ ,atol=1E-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(a__ ):
os.mkdir(a__ )
# Save converted model and image processor
hf_model.save_pretrained(a__ )
preprocessor.save_pretrained(a__ )
if push_to_hub:
# Push model and image processor to hub
print(f"""Pushing converted {model_name} to the hub...""" )
__A : List[str] = f"""efficientnet-{model_name}"""
preprocessor.push_to_hub(a__ )
hf_model.push_to_hub(a__ )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 17 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ) -> str | Literal[False]:
__A : Tuple = list(a__ )
__A : Optional[int] = list(a__ )
__A : int = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count += 1
__A : int = """_"""
if count > 1:
return False
else:
return "".join(a__ )
def __SCREAMING_SNAKE_CASE ( a__ : list[str] ) -> list[str]:
__A : Optional[Any] = []
while True:
__A : Tuple = ["""$"""] * len(a__ )
__A : Union[str, Any] = []
for i in range(len(a__ ) ):
for j in range(i + 1 ,len(a__ ) ):
__A : int = compare_string(binary[i] ,binary[j] )
if k is False:
__A : List[str] = """*"""
__A : Any = """*"""
temp.append("""X""" )
for i in range(len(a__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(a__ ) == 0:
return pi
__A : Optional[Any] = list(set(a__ ) )
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : Sequence[float] ) -> list[str]:
__A : List[str] = []
for minterm in minterms:
__A : List[Any] = """"""
for _ in range(a__ ):
__A : Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(a__ )
return temp
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ,a__ : int ) -> bool:
__A : Optional[Any] = list(a__ )
__A : Tuple = list(a__ )
__A : Any = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __SCREAMING_SNAKE_CASE ( a__ : list[list[int]] ,a__ : list[str] ) -> list[str]:
__A : Optional[int] = []
__A : Tuple = [0] * len(a__ )
for i in range(len(chart[0] ) ):
__A : str = 0
__A : Any = -1
for j in range(len(a__ ) ):
if chart[j][i] == 1:
count += 1
__A : Optional[Any] = j
if count == 1:
__A : int = 1
for i in range(len(a__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(a__ ) ):
__A : List[str] = 0
temp.append(prime_implicants[i] )
while True:
__A : Optional[Any] = 0
__A : Any = -1
__A : int = 0
for i in range(len(a__ ) ):
__A : List[Any] = chart[i].count(1 )
if count_n > max_n:
__A : Dict = count_n
__A : Tuple = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(a__ ) ):
__A : Union[str, Any] = 0
def __SCREAMING_SNAKE_CASE ( a__ : list[str] ,a__ : list[str] ) -> list[list[int]]:
__A : Any = [[0 for x in range(len(a__ ) )] for x in range(len(a__ ) )]
for i in range(len(a__ ) ):
__A : List[Any] = prime_implicants[i].count("""_""" )
for j in range(len(a__ ) ):
if is_for_table(prime_implicants[i] ,binary[j] ,a__ ):
__A : Union[str, Any] = 1
return chart
def __SCREAMING_SNAKE_CASE ( ) -> None:
__A : Any = int(input("""Enter the no. of variables\n""" ) )
__A : List[str] = [
float(a__ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
__A : Dict = decimal_to_binary(a__ ,a__ )
__A : Union[str, Any] = check(a__ )
print("""Prime Implicants are:""" )
print(a__ )
__A : Optional[Any] = prime_implicant_chart(a__ ,a__ )
__A : Any = selection(a__ ,a__ )
print("""Essential Prime Implicants are:""" )
print(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 17 | 1 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : Union[str, Any] = r'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class lowerCamelCase_ ( _lowercase ):
@add_start_docstrings(__A )
def __call__( self : str , __A : torch.LongTensor , __A : torch.FloatTensor , **__A : Optional[Any] ):
raise NotImplementedError("""StoppingCriteria needs to be subclassed""" )
class lowerCamelCase_ ( _lowercase ):
def __init__( self : Union[str, Any] , __A : int , __A : Optional[int] = None ):
__A : Optional[int] = max_length
__A : Optional[int] = max_position_embeddings
@add_start_docstrings(__A )
def __call__( self : Union[str, Any] , __A : torch.LongTensor , __A : torch.FloatTensor , **__A : Optional[int] ):
__A : Optional[Any] = input_ids.shape[-1]
__A : Union[str, Any] = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"""This is a friendly reminder - the current text generation call will exceed the model's predefined """
F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
"""exceptions, performance degradation, or nothing at all.""" )
return is_done
class lowerCamelCase_ ( _lowercase ):
def __init__( self : List[str] , __A : int , __A : int ):
warnings.warn(
"""The class `MaxNewTokensCriteria` is deprecated. """
F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
"""with `max_length = start_length + max_new_tokens` instead.""" , __A , )
__A : Dict = start_length
__A : Optional[int] = max_new_tokens
__A : Tuple = start_length + max_new_tokens
@add_start_docstrings(__A )
def __call__( self : Tuple , __A : torch.LongTensor , __A : torch.FloatTensor , **__A : str ):
return input_ids.shape[-1] >= self.max_length
class lowerCamelCase_ ( _lowercase ):
def __init__( self : int , __A : float , __A : Optional[float] = None ):
__A : Optional[int] = max_time
__A : int = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(__A )
def __call__( self : int , __A : torch.LongTensor , __A : torch.FloatTensor , **__A : Optional[int] ):
return time.time() - self.initial_timestamp > self.max_time
class lowerCamelCase_ ( _lowercase ):
@add_start_docstrings(__A )
def __call__( self : List[str] , __A : torch.LongTensor , __A : torch.FloatTensor , **__A : List[str] ):
return any(criteria(__A , __A ) for criteria in self )
@property
def lowerCAmelCase_ ( self : int ):
for stopping_criterium in self:
if isinstance(__A , __A ):
return stopping_criterium.max_length
elif isinstance(__A , __A ):
return stopping_criterium.max_length
return None
def __SCREAMING_SNAKE_CASE ( a__ : StoppingCriteriaList ,a__ : int ) -> StoppingCriteriaList:
__A : int = stopping_criteria.max_length
__A : Optional[int] = deepcopy(a__ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("""You set different `max_length` for stopping criteria and `max_length` parameter""" ,a__ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=a__ ) )
return new_stopping_criteria
| 17 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ,a__ : Dict ,a__ : Union[str, Any] ,a__ : Any ) -> Optional[int]: # noqa: E741
while r - l > 1:
__A : Any = (l + r) // 2
if v[m] >= key:
__A : Optional[int] = m
else:
__A : List[Any] = m # noqa: E741
return r
def __SCREAMING_SNAKE_CASE ( a__ : list[int] ) -> int:
if len(a__ ) == 0:
return 0
__A : str = [0] * len(a__ )
__A : List[str] = 1
__A : List[Any] = v[0]
for i in range(1 ,len(a__ ) ):
if v[i] < tail[0]:
__A : int = v[i]
elif v[i] > tail[length - 1]:
__A : Union[str, Any] = v[i]
length += 1
else:
__A : Any = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 1 |
import os
from pathlib import Path
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
from torch.utils.cpp_extension import load
__A : int = Path(a__ ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
__A : Any = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" ,"""ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" ,"""ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" ,a__ ,with_cuda=a__ ,extra_include_paths=[str(a__ )] ,extra_cflags=["""-DWITH_CUDA=1"""] ,extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] ,)
import MultiScaleDeformableAttention as MSDA
return MSDA
| 17 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 17 | 1 |
from __future__ import annotations
from random import choice
def __SCREAMING_SNAKE_CASE ( a__ : str ) -> Optional[Any]:
return choice(a__ )
def __SCREAMING_SNAKE_CASE ( a__ : list[int] ,a__ : int ) -> int:
__A : List[str] = random_pivot(a__ )
# partition based on pivot
# linear time
__A : Optional[int] = [e for e in lst if e < pivot]
__A : Tuple = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(a__ ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(a__ ) < k - 1:
return kth_number(a__ ,k - len(a__ ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(a__ ,a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class lowerCamelCase_ ( _lowercase ):
_lowercase : Dict = '''unispeech'''
def __init__( self : str , __A : Tuple=32 , __A : List[str]=768 , __A : Dict=12 , __A : Union[str, Any]=12 , __A : Tuple=3072 , __A : Any="gelu" , __A : int=0.1 , __A : Optional[int]=0.1 , __A : List[Any]=0.1 , __A : Any=0.0 , __A : List[str]=0.0 , __A : int=0.1 , __A : List[Any]=0.1 , __A : List[str]=0.0_2 , __A : List[str]=1e-5 , __A : List[Any]="group" , __A : int="gelu" , __A : Any=(512, 512, 512, 512, 512, 512, 512) , __A : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , __A : Tuple=(10, 3, 3, 3, 3, 2, 2) , __A : Optional[int]=False , __A : Any=128 , __A : Union[str, Any]=16 , __A : Optional[Any]=False , __A : str=True , __A : Dict=0.0_5 , __A : Optional[Any]=10 , __A : Dict=2 , __A : int=0.0 , __A : List[str]=10 , __A : str=0 , __A : List[str]=320 , __A : List[Any]=2 , __A : Tuple=0.1 , __A : Optional[int]=100 , __A : Any=256 , __A : Dict=256 , __A : Tuple=0.1 , __A : List[str]="mean" , __A : int=False , __A : List[str]=False , __A : List[Any]=256 , __A : str=80 , __A : Tuple=0 , __A : Tuple=1 , __A : int=2 , __A : Dict=0.5 , **__A : List[Any] , ):
super().__init__(**__A , pad_token_id=__A , bos_token_id=__A , eos_token_id=__A )
__A : Dict = hidden_size
__A : Optional[Any] = feat_extract_norm
__A : List[Any] = feat_extract_activation
__A : str = list(__A )
__A : Optional[Any] = list(__A )
__A : Optional[int] = list(__A )
__A : List[Any] = conv_bias
__A : Optional[int] = num_conv_pos_embeddings
__A : List[Any] = num_conv_pos_embedding_groups
__A : int = len(self.conv_dim )
__A : Optional[Any] = num_hidden_layers
__A : List[str] = intermediate_size
__A : Union[str, Any] = hidden_act
__A : Optional[int] = num_attention_heads
__A : Tuple = hidden_dropout
__A : Optional[Any] = attention_dropout
__A : Union[str, Any] = activation_dropout
__A : Dict = feat_proj_dropout
__A : Optional[int] = final_dropout
__A : Dict = layerdrop
__A : Optional[int] = layer_norm_eps
__A : Optional[Any] = initializer_range
__A : Optional[int] = num_ctc_classes
__A : Dict = vocab_size
__A : List[str] = do_stable_layer_norm
__A : Tuple = use_weighted_layer_sum
__A : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__A : Tuple = apply_spec_augment
__A : Union[str, Any] = mask_time_prob
__A : Optional[Any] = mask_time_length
__A : List[Any] = mask_time_min_masks
__A : List[Any] = mask_feature_prob
__A : Any = mask_feature_length
__A : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__A : Any = num_codevectors_per_group
__A : Tuple = num_codevector_groups
__A : List[str] = contrastive_logits_temperature
__A : Optional[int] = feat_quantizer_dropout
__A : int = num_negatives
__A : List[str] = codevector_dim
__A : int = proj_codevector_dim
__A : Union[str, Any] = diversity_loss_weight
# ctc loss
__A : List[str] = ctc_loss_reduction
__A : Any = ctc_zero_infinity
# pretraining loss
__A : Union[str, Any] = replace_prob
@property
def lowerCAmelCase_ ( self : int ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 17 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ : Optional[Any] = {
'''configuration_altclip''': [
'''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AltCLIPConfig''',
'''AltCLIPTextConfig''',
'''AltCLIPVisionConfig''',
],
'''processing_altclip''': ['''AltCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AltCLIPPreTrainedModel''',
'''AltCLIPModel''',
'''AltCLIPTextModel''',
'''AltCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCAmelCase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 |
import fire
from utils import calculate_rouge, save_json
def __SCREAMING_SNAKE_CASE ( a__ : Any ,a__ : Tuple ,a__ : Any=None ,**a__ : Dict ) -> Optional[Any]:
__A : int = [x.strip() for x in open(a__ ).readlines()]
__A : List[str] = [x.strip() for x in open(a__ ).readlines()][: len(a__ )]
__A : List[Any] = calculate_rouge(a__ ,a__ ,**a__ )
if save_path is not None:
save_json(a__ ,a__ ,indent=a__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 17 | 1 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowerCamelCase_ :
def __init__( self : Optional[Any] , __A : List[Any] , ):
__A : List[Any] = parent
__A : Tuple = 13
__A : Any = 7
__A : Any = True
__A : Any = True
__A : Dict = True
__A : int = 99
__A : List[str] = 32
__A : Optional[Any] = 2
__A : Dict = 4
__A : Dict = 37
__A : List[str] = """gelu"""
__A : str = 0.1
__A : List[Any] = 0.1
__A : Dict = 512
__A : str = 16
__A : Union[str, Any] = 2
__A : Optional[int] = 0.0_2
__A : Optional[int] = 3
__A : int = 4
__A : Optional[int] = None
def lowerCAmelCase_ ( self : Optional[int] ):
__A : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : int = None
if self.use_input_mask:
__A : str = random_attention_mask([self.batch_size, self.seq_length] )
__A : Union[str, Any] = None
__A : Dict = None
__A : int = None
if self.use_labels:
__A : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A : str = ids_tensor([self.batch_size] , self.num_choices )
__A : str = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : str ):
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) : int = self.prepare_config_and_inputs()
__A : str = True
__A : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__A : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase_ ( self : int , __A : List[Any] , __A : List[str] , __A : Any , __A : List[str] , __A : List[str] , __A : List[Any] ):
__A : Union[str, Any] = TFEsmModel(config=__A )
__A : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__A : Dict = model(__A )
__A : List[str] = [input_ids, input_mask]
__A : List[str] = model(__A )
__A : Tuple = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : int , __A : str , __A : Any , __A : Optional[Any] , __A : str , __A : int , __A : int , __A : List[str] , __A : str , ):
__A : List[str] = True
__A : Optional[int] = TFEsmModel(config=__A )
__A : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
__A : Optional[int] = model(__A )
__A : List[Any] = [input_ids, input_mask]
__A : int = model(__A , encoder_hidden_states=__A )
# Also check the case where encoder outputs are not passed
__A : Tuple = model(__A , attention_mask=__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Dict , __A : Union[str, Any] , __A : Dict , __A : List[str] , __A : Tuple , __A : int , __A : Tuple ):
__A : Optional[Any] = TFEsmForMaskedLM(config=__A )
__A : Optional[int] = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : Optional[int] , __A : Dict , __A : Any , __A : Any , __A : str , __A : Dict , __A : Any ):
__A : Tuple = self.num_labels
__A : Any = TFEsmForTokenClassification(config=__A )
__A : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__A : List[Any] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : List[Any] ):
__A : Tuple = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) : str = config_and_inputs
__A : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
_lowercase : List[str] = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_lowercase : List[Any] = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowercase : int = False
_lowercase : Tuple = False
def lowerCAmelCase_ ( self : int ):
__A : Union[str, Any] = TFEsmModelTester(self )
__A : List[Any] = ConfigTester(self , config_class=__A , hidden_size=37 )
def lowerCAmelCase_ ( self : Dict ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : int ):
__A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCAmelCase_ ( self : Dict ):
__A : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__A )
def lowerCAmelCase_ ( self : Tuple ):
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def lowerCAmelCase_ ( self : Optional[int] ):
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
@slow
def lowerCAmelCase_ ( self : Dict ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : Tuple = TFEsmModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def lowerCAmelCase_ ( self : int ):
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def lowerCAmelCase_ ( self : str ):
pass
def lowerCAmelCase_ ( self : Optional[Any] ):
__A , __A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : str = model_class(__A )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
__A : List[str] = model.get_bias()
assert isinstance(__A , __A )
for k, v in name.items():
assert isinstance(__A , tf.Variable )
else:
__A : Tuple = model.get_output_embeddings()
assert x is None
__A : int = model.get_bias()
assert name is None
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : Any ):
__A : Optional[int] = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
__A : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__A : Optional[int] = model(__A )[0]
__A : Dict = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , __A )
# compare the actual values for a slice.
__A : Union[str, Any] = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
__A : str = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
__A : Optional[Any] = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__A : Tuple = model(__A )[0]
# compare the actual values for a slice.
__A : Union[str, Any] = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 17 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( a__ : Optional[Any] ,a__ : Union[str, Any] ,a__ : Optional[int] ) -> List[Any]:
# Initialise PyTorch model
__A : Dict = MobileBertConfig.from_json_file(a__ )
print(f"""Building PyTorch model from configuration: {config}""" )
__A : Tuple = MobileBertForPreTraining(a__ )
# Load weights from tf checkpoint
__A : Dict = load_tf_weights_in_mobilebert(a__ ,a__ ,a__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() ,a__ )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 17 | 1 |
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : int ,a__ : int ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
__A : int = _modexpt(a__ ,exponent // 2 ,a__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(a__ ,exponent - 1 ,a__ )) % modulo_value
def __SCREAMING_SNAKE_CASE ( a__ : int = 1777 ,a__ : int = 1855 ,a__ : int = 8 ) -> int:
__A : Dict = base
for _ in range(1 ,a__ ):
__A : Optional[Any] = _modexpt(a__ ,a__ ,10**digits )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 17 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : int = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 | 1 |
from math import factorial
def __SCREAMING_SNAKE_CASE ( a__ : int = 100 ) -> int:
return sum(map(a__ ,str(factorial(a__ ) ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 17 |
import math
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , __A : List[str]=0 ): # a graph with Node 0,1,...,N-1
__A : List[str] = n
__A : List[str] = [
[math.inf for j in range(0 , __A )] for i in range(0 , __A )
] # adjacency matrix for weight
__A : str = [
[math.inf for j in range(0 , __A )] for i in range(0 , __A )
] # dp[i][j] stores minimum distance from i to j
def lowerCAmelCase_ ( self : str , __A : Union[str, Any] , __A : Any , __A : Optional[int] ):
__A : List[Any] = w
def lowerCAmelCase_ ( self : Union[str, Any] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__A : List[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def lowerCAmelCase_ ( self : int , __A : List[str] , __A : List[str] ):
return self.dp[u][v]
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 17 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase_ ( _lowercase , unittest.TestCase ):
_lowercase : List[Any] = KandinskyVaaControlnetImgaImgPipeline
_lowercase : List[Any] = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
_lowercase : int = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
_lowercase : List[str] = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_lowercase : int = False
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return 32
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return 32
@property
def lowerCAmelCase_ ( self : str ):
return self.time_input_dim
@property
def lowerCAmelCase_ ( self : Any ):
return self.time_input_dim * 4
@property
def lowerCAmelCase_ ( self : Tuple ):
return 100
@property
def lowerCAmelCase_ ( self : Optional[int] ):
torch.manual_seed(0 )
__A : List[Any] = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__A : Any = UNetaDConditionModel(**__A )
return model
@property
def lowerCAmelCase_ ( self : str ):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase_ ( self : Dict ):
torch.manual_seed(0 )
__A : Union[str, Any] = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase_ ( self : Optional[int] ):
__A : Any = self.dummy_unet
__A : Any = self.dummy_movq
__A : Optional[Any] = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0_0_8_5,
"""beta_end""": 0.0_1_2,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
__A : Tuple = DDIMScheduler(**__A )
__A : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCAmelCase_ ( self : Optional[Any] , __A : str , __A : str=0 ):
__A : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__A ) ).to(__A )
__A : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__A )
# create init_image
__A : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(__A ) ).to(__A )
__A : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__A : List[str] = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((256, 256) )
# create hint
__A : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(__A ) ).to(__A )
if str(__A ).startswith("""mps""" ):
__A : str = torch.manual_seed(__A )
else:
__A : List[Any] = torch.Generator(device=__A ).manual_seed(__A )
__A : Optional[int] = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase_ ( self : Any ):
__A : Any = """cpu"""
__A : str = self.get_dummy_components()
__A : Any = self.pipeline_class(**__A )
__A : Union[str, Any] = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
__A : Optional[int] = pipe(**self.get_dummy_inputs(__A ) )
__A : List[Any] = output.images
__A : int = pipe(
**self.get_dummy_inputs(__A ) , return_dict=__A , )[0]
__A : Optional[int] = image[0, -3:, -3:, -1]
__A : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__A : str = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowerCAmelCase_ ( self : Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : int ):
__A : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
__A : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__A : List[str] = init_image.resize((512, 512) )
__A : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
__A : Optional[int] = torch.from_numpy(np.array(__A ) ).float() / 2_5_5.0
__A : int = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
__A : List[str] = """A robot, 4k photo"""
__A : int = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__A )
__A : List[Any] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
__A : str = pipeline.to(__A )
pipeline.set_progress_bar_config(disable=__A )
__A : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
__A , __A : Tuple = pipe_prior(
__A , image=__A , strength=0.8_5 , generator=__A , negative_prompt="""""" , ).to_tuple()
__A : Optional[Any] = pipeline(
image=__A , image_embeds=__A , negative_image_embeds=__A , hint=__A , generator=__A , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="""np""" , )
__A : int = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__A , __A )
| 17 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : complex ,a__ : str = "x" ,a__ : float = 10**-10 ,a__ : int = 1 ,) -> complex:
__A : Tuple = symbols(a__ )
__A : List[str] = lambdify(a__ ,a__ )
__A : Any = lambdify(a__ ,diff(a__ ,a__ ) )
__A : Dict = starting_point
while True:
if diff_function(a__ ) != 0:
__A : Optional[int] = prev_guess - multiplicity * func(a__ ) / diff_function(
a__ )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__A : List[Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 17 | 1 |
import itertools
import string
from collections.abc import Generator, Iterable
def __SCREAMING_SNAKE_CASE ( a__ : Iterable[str] ,a__ : int ) -> Generator[tuple[str, ...], None, None]:
__A : Union[str, Any] = iter(a__ )
while True:
__A : Union[str, Any] = tuple(itertools.islice(a__ ,a__ ) )
if not chunk:
return
yield chunk
def __SCREAMING_SNAKE_CASE ( a__ : str ) -> str:
__A : List[Any] = """""".join([c.upper() for c in dirty if c in string.ascii_letters] )
__A : Union[str, Any] = """"""
if len(a__ ) < 2:
return dirty
for i in range(len(a__ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(a__ ) & 1:
clean += "X"
return clean
def __SCREAMING_SNAKE_CASE ( a__ : str ) -> list[str]:
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
__A : Dict = """ABCDEFGHIKLMNOPQRSTUVWXYZ"""
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
__A : Dict = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(a__ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(a__ )
return table
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ) -> str:
__A : Optional[int] = generate_table(a__ )
__A : str = prepare_input(a__ )
__A : Tuple = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(a__ ,2 ):
__A , __A : int = divmod(table.index(a__ ) ,5 )
__A , __A : Union[str, Any] = divmod(table.index(a__ ) ,5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ) -> str:
__A : Optional[int] = generate_table(a__ )
__A : List[str] = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(a__ ,2 ):
__A , __A : List[Any] = divmod(table.index(a__ ) ,5 )
__A , __A : Tuple = divmod(table.index(a__ ) ,5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 17 |
from math import sqrt
def __SCREAMING_SNAKE_CASE ( a__ : int = 1000000 ) -> int:
__A : int = 0
__A : int = 0
__A : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(a__ ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 17 | 1 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase_ ( _lowercase ):
_lowercase : str = (PNDMScheduler,)
_lowercase : List[Any] = (('''num_inference_steps''', 50),)
def lowerCAmelCase_ ( self : str , **__A : Optional[int] ):
__A : List[str] = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
}
config.update(**__A )
return config
def lowerCAmelCase_ ( self : List[str] , __A : str=0 , **__A : List[str] ):
__A : Tuple = dict(self.forward_default_kwargs )
__A : str = kwargs.pop("""num_inference_steps""" , __A )
__A : str = self.dummy_sample
__A : Any = 0.1 * sample
__A : List[str] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
__A : Dict = self.get_scheduler_config(**__A )
__A : Optional[Any] = scheduler_class(**__A )
scheduler.set_timesteps(__A )
# copy over dummy past residuals
__A : Union[str, Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__A )
__A : str = scheduler_class.from_pretrained(__A )
new_scheduler.set_timesteps(__A )
# copy over dummy past residuals
__A : Any = dummy_past_residuals[:]
__A : List[str] = scheduler.step_prk(__A , __A , __A , **__A ).prev_sample
__A : int = new_scheduler.step_prk(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
__A : int = scheduler.step_plms(__A , __A , __A , **__A ).prev_sample
__A : List[str] = new_scheduler.step_plms(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ ( self : str ):
pass
def lowerCAmelCase_ ( self : Optional[int] , __A : Dict=0 , **__A : str ):
__A : List[str] = dict(self.forward_default_kwargs )
__A : Dict = kwargs.pop("""num_inference_steps""" , __A )
__A : int = self.dummy_sample
__A : Tuple = 0.1 * sample
__A : Optional[int] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
__A : Any = self.get_scheduler_config()
__A : Union[str, Any] = scheduler_class(**__A )
scheduler.set_timesteps(__A )
# copy over dummy past residuals (must be after setting timesteps)
__A : Dict = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__A )
__A : List[str] = scheduler_class.from_pretrained(__A )
# copy over dummy past residuals
new_scheduler.set_timesteps(__A )
# copy over dummy past residual (must be after setting timesteps)
__A : List[Any] = dummy_past_residuals[:]
__A : List[str] = scheduler.step_prk(__A , __A , __A , **__A ).prev_sample
__A : Any = new_scheduler.step_prk(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
__A : Tuple = scheduler.step_plms(__A , __A , __A , **__A ).prev_sample
__A : Tuple = new_scheduler.step_plms(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ ( self : Union[str, Any] , **__A : Optional[Any] ):
__A : int = self.scheduler_classes[0]
__A : List[Any] = self.get_scheduler_config(**__A )
__A : List[Any] = scheduler_class(**__A )
__A : Optional[int] = 10
__A : Optional[Any] = self.dummy_model()
__A : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(__A )
for i, t in enumerate(scheduler.prk_timesteps ):
__A : Tuple = model(__A , __A )
__A : List[str] = scheduler.step_prk(__A , __A , __A ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
__A : Optional[Any] = model(__A , __A )
__A : Tuple = scheduler.step_plms(__A , __A , __A ).prev_sample
return sample
def lowerCAmelCase_ ( self : str ):
__A : Tuple = dict(self.forward_default_kwargs )
__A : Optional[int] = kwargs.pop("""num_inference_steps""" , __A )
for scheduler_class in self.scheduler_classes:
__A : str = self.get_scheduler_config()
__A : List[str] = scheduler_class(**__A )
__A : Any = self.dummy_sample
__A : str = 0.1 * sample
if num_inference_steps is not None and hasattr(__A , """set_timesteps""" ):
scheduler.set_timesteps(__A )
elif num_inference_steps is not None and not hasattr(__A , """set_timesteps""" ):
__A : Union[str, Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__A : Optional[int] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
__A : Any = dummy_past_residuals[:]
__A : Any = scheduler.step_prk(__A , 0 , __A , **__A ).prev_sample
__A : Tuple = scheduler.step_prk(__A , 1 , __A , **__A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__A : Dict = scheduler.step_plms(__A , 0 , __A , **__A ).prev_sample
__A : List[Any] = scheduler.step_plms(__A , 1 , __A , **__A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase_ ( self : Optional[int] ):
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__A )
def lowerCAmelCase_ ( self : Optional[int] ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__A )
__A : int = self.scheduler_classes[0]
__A : Any = self.get_scheduler_config(steps_offset=1 )
__A : List[str] = scheduler_class(**__A )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def lowerCAmelCase_ ( self : Any ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] , [0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=__A , beta_end=__A )
def lowerCAmelCase_ ( self : str ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__A )
def lowerCAmelCase_ ( self : List[str] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__A )
def lowerCAmelCase_ ( self : Optional[Any] ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=__A )
def lowerCAmelCase_ ( self : Any ):
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__A )
def lowerCAmelCase_ ( self : Optional[Any] ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
__A : Dict = 27
for scheduler_class in self.scheduler_classes:
__A : Tuple = self.dummy_sample
__A : List[str] = 0.1 * sample
__A : List[Any] = self.get_scheduler_config()
__A : int = scheduler_class(**__A )
scheduler.set_timesteps(__A )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
__A : Any = scheduler.step_prk(__A , __A , __A ).prev_sample
def lowerCAmelCase_ ( self : List[Any] ):
with self.assertRaises(__A ):
__A : Optional[int] = self.scheduler_classes[0]
__A : int = self.get_scheduler_config()
__A : Tuple = scheduler_class(**__A )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def lowerCAmelCase_ ( self : Dict ):
__A : Dict = self.full_loop()
__A : int = torch.sum(torch.abs(__A ) )
__A : str = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3
def lowerCAmelCase_ ( self : Optional[Any] ):
__A : Union[str, Any] = self.full_loop(prediction_type="""v_prediction""" )
__A : Tuple = torch.sum(torch.abs(__A ) )
__A : List[Any] = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3
def lowerCAmelCase_ ( self : str ):
# We specify different beta, so that the first alpha is 0.99
__A : Optional[Any] = self.full_loop(set_alpha_to_one=__A , beta_start=0.0_1 )
__A : List[Any] = torch.sum(torch.abs(__A ) )
__A : Optional[int] = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3
def lowerCAmelCase_ ( self : Optional[int] ):
# We specify different beta, so that the first alpha is 0.99
__A : Optional[int] = self.full_loop(set_alpha_to_one=__A , beta_start=0.0_1 )
__A : Optional[int] = torch.sum(torch.abs(__A ) )
__A : List[Any] = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
| 17 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase_ ( _lowercase ):
_lowercase : List[str] = ['''pixel_values''']
def __init__( self : Dict , __A : bool = True , __A : Optional[Dict[str, int]] = None , __A : PILImageResampling = PILImageResampling.BILINEAR , __A : bool = True , __A : Dict[str, int] = None , __A : bool = True , __A : Union[int, float] = 1 / 255 , __A : bool = True , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , **__A : int , ):
super().__init__(**__A )
__A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 256}
__A : Dict = get_size_dict(__A , default_to_square=__A )
__A : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__A : int = get_size_dict(__A , param_name="""crop_size""" )
__A : str = do_resize
__A : Dict = size
__A : Any = resample
__A : Optional[Any] = do_center_crop
__A : List[str] = crop_size
__A : Optional[int] = do_rescale
__A : int = rescale_factor
__A : Union[str, Any] = do_normalize
__A : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__A : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase_ ( self : Optional[Any] , __A : np.ndarray , __A : Dict[str, int] , __A : PILImageResampling = PILImageResampling.BICUBIC , __A : Optional[Union[str, ChannelDimension]] = None , **__A : List[Any] , ):
__A : str = get_size_dict(__A , default_to_square=__A )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__A : Dict = get_resize_output_image_size(__A , size=size["""shortest_edge"""] , default_to_square=__A )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : Tuple , __A : np.ndarray , __A : Dict[str, int] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : str , ):
__A : str = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(__A , size=(size["""height"""], size["""width"""]) , data_format=__A , **__A )
def lowerCAmelCase_ ( self : List[str] , __A : np.ndarray , __A : float , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Optional[int] ):
return rescale(__A , scale=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : Any , __A : np.ndarray , __A : Union[float, List[float]] , __A : Union[float, List[float]] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Tuple , ):
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : int , __A : ImageInput , __A : Optional[bool] = None , __A : Dict[str, int] = None , __A : PILImageResampling = None , __A : bool = None , __A : Dict[str, int] = None , __A : Optional[bool] = None , __A : Optional[float] = None , __A : Optional[bool] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[str, TensorType]] = None , __A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__A : Optional[int] , ):
__A : List[str] = do_resize if do_resize is not None else self.do_resize
__A : Any = size if size is not None else self.size
__A : Union[str, Any] = get_size_dict(__A , default_to_square=__A )
__A : Tuple = resample if resample is not None else self.resample
__A : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
__A : List[Any] = crop_size if crop_size is not None else self.crop_size
__A : int = get_size_dict(__A , param_name="""crop_size""" )
__A : Tuple = do_rescale if do_rescale is not None else self.do_rescale
__A : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__A : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
__A : Optional[int] = image_mean if image_mean is not None else self.image_mean
__A : List[str] = image_std if image_std is not None else self.image_std
__A : Union[str, Any] = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__A : Union[str, Any] = [to_numpy_array(__A ) for image in images]
if do_resize:
__A : int = [self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_center_crop:
__A : Optional[Any] = [self.center_crop(image=__A , size=__A ) for image in images]
if do_rescale:
__A : List[Any] = [self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
__A : Any = [self.normalize(image=__A , mean=__A , std=__A ) for image in images]
__A : int = [to_channel_dimension_format(__A , __A ) for image in images]
__A : Tuple = {"""pixel_values""": images}
return BatchFeature(data=__A , tensor_type=__A )
def lowerCAmelCase_ ( self : int , __A : List[str] , __A : List[Tuple] = None ):
__A : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__A ) != len(__A ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(__A ):
__A : str = target_sizes.numpy()
__A : int = []
for idx in range(len(__A ) ):
__A : Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=__A )
__A : Union[str, Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__A )
else:
__A : List[str] = logits.argmax(dim=1 )
__A : Tuple = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 17 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : List[str] = {
'''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''],
'''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = ['''BertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
'''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BertForMaskedLM''',
'''BertForMultipleChoice''',
'''BertForNextSentencePrediction''',
'''BertForPreTraining''',
'''BertForQuestionAnswering''',
'''BertForSequenceClassification''',
'''BertForTokenClassification''',
'''BertLayer''',
'''BertLMHeadModel''',
'''BertModel''',
'''BertPreTrainedModel''',
'''load_tf_weights_in_bert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
'''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBertEmbeddings''',
'''TFBertForMaskedLM''',
'''TFBertForMultipleChoice''',
'''TFBertForNextSentencePrediction''',
'''TFBertForPreTraining''',
'''TFBertForQuestionAnswering''',
'''TFBertForSequenceClassification''',
'''TFBertForTokenClassification''',
'''TFBertLMHeadModel''',
'''TFBertMainLayer''',
'''TFBertModel''',
'''TFBertPreTrainedModel''',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = ['''TFBertTokenizer''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'''FlaxBertForCausalLM''',
'''FlaxBertForMaskedLM''',
'''FlaxBertForMultipleChoice''',
'''FlaxBertForNextSentencePrediction''',
'''FlaxBertForPreTraining''',
'''FlaxBertForQuestionAnswering''',
'''FlaxBertForSequenceClassification''',
'''FlaxBertForTokenClassification''',
'''FlaxBertModel''',
'''FlaxBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 |
class lowerCamelCase_ :
def __init__( self : Dict , __A : Tuple , __A : Optional[int] , __A : int ):
__A : List[str] = name
__A : Optional[int] = value
__A : Optional[Any] = weight
def __repr__( self : Any ):
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def lowerCAmelCase_ ( self : Union[str, Any] ):
return self.value
def lowerCAmelCase_ ( self : str ):
return self.name
def lowerCAmelCase_ ( self : str ):
return self.weight
def lowerCAmelCase_ ( self : Dict ):
return self.value / self.weight
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : Optional[int] ,a__ : Union[str, Any] ) -> int:
__A : Tuple = []
for i in range(len(a__ ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def __SCREAMING_SNAKE_CASE ( a__ : Tuple ,a__ : Any ,a__ : Optional[int] ) -> Tuple:
__A : Optional[int] = sorted(a__ ,key=a__ ,reverse=a__ )
__A : Optional[Any] = []
__A , __A : Tuple = 0.0, 0.0
for i in range(len(a__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 1 |
UpperCAmelCase_ : int = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 17 |
UpperCAmelCase_ : dict[str, float] = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.35_5818,
}
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ,a__ : float ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__A : Optional[int] = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {", ".join(a__ )}"""
)
raise ValueError(a__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 1 |
from math import factorial
def __SCREAMING_SNAKE_CASE ( a__ : int = 100 ) -> int:
return sum(int(a__ ) for x in str(factorial(a__ ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 17 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ : Optional[Any] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 | 1 |
from collections.abc import Sequence
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[float] ,a__ : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(a__ ) )
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[float] ,a__ : float ) -> float:
__A : Any = 0.0
for coeff in reversed(a__ ):
__A : List[str] = result * x + coeff
return result
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCAmelCase_ : str = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 17 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
UpperCAmelCase_ : Optional[Any] = [
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''},
{'''dataset''': '''snli''', '''config_name''': '''plain_text'''},
{'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''},
{'''dataset''': '''wiki40b''', '''config_name''': '''en'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''},
{'''dataset''': '''natural_questions''', '''config_name''': '''default'''},
]
def __SCREAMING_SNAKE_CASE ( a__ : str=True ) -> List[Any]:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_lowercase ) )
class lowerCamelCase_ ( _lowercase ):
_lowercase : Optional[int] = None
_lowercase : str = None
def lowerCAmelCase_ ( self : Dict , __A : Optional[int] , __A : Optional[Any] ):
with TemporaryDirectory() as tmp_dir:
__A : List[Any] = dataset_module_factory(__A , cache_dir=__A )
__A : Tuple = import_main_class(dataset_module.module_path , dataset=__A )
__A : DatasetBuilder = builder_cls(
cache_dir=__A , config_name=__A , hash=dataset_module.hash , )
__A : List[Any] = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=__A ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
__A : Union[str, Any] = cached_path(__A , cache_dir=__A )
self.assertTrue(os.path.exists(__A ) )
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( a__ : Dict ) -> Optional[Any]:
__A : Optional[Any] = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
__A : Union[str, Any] = dataset_module_factory("""wikipedia""" ,cache_dir=a__ )
__A : List[Any] = import_main_class(dataset_module.module_path )
__A : DatasetBuilder = builder_cls(
cache_dir=a__ ,config_name="""20220301.frr""" ,hash=dataset_module.hash ,)
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
__A : Any = None
builder_instance.download_and_prepare()
__A : Union[str, Any] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ) -> List[str]:
__A : Tuple = dataset_module_factory("""wikipedia""" ,cache_dir=a__ )
__A : str = import_main_class(dataset_module.module_path ,dataset=a__ )
__A : DatasetBuilder = builder_cls(
cache_dir=a__ ,config_name="""20220301.frr""" ,hash=dataset_module.hash ,)
__A : Optional[int] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(a__ ,a__ )
assert "train" in ds
assert isinstance(ds["""train"""] ,a__ )
assert next(iter(ds["""train"""] ) )
| 17 | 1 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
_lowercase : Tuple = StableDiffusionDiffEditPipeline
_lowercase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
_lowercase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
_lowercase : Optional[int] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowercase : List[Any] = frozenset([] )
def lowerCAmelCase_ ( self : List[str] ):
torch.manual_seed(0 )
__A : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__A , )
__A : Dict = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
__A : Union[str, Any] = DDIMInverseScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_zero=__A , )
torch.manual_seed(0 )
__A : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__A : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
__A : Optional[Any] = CLIPTextModel(__A )
__A : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__A : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase_ ( self : Any , __A : List[str] , __A : List[Any]=0 ):
__A : Optional[Any] = floats_tensor((1, 16, 16) , rng=random.Random(__A ) ).to(__A )
__A : Optional[Any] = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(__A ) ).to(__A )
if str(__A ).startswith("""mps""" ):
__A : Dict = torch.manual_seed(__A )
else:
__A : Tuple = torch.Generator(device=__A ).manual_seed(__A )
__A : Tuple = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase_ ( self : Union[str, Any] , __A : int , __A : List[str]=0 ):
__A : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__A ) ).to(__A )
__A : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__A : int = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" )
if str(__A ).startswith("""mps""" ):
__A : Any = torch.manual_seed(__A )
else:
__A : Union[str, Any] = torch.Generator(device=__A ).manual_seed(__A )
__A : Optional[int] = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase_ ( self : int , __A : str , __A : List[Any]=0 ):
__A : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(__A ) ).to(__A )
__A : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__A : Dict = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" )
if str(__A ).startswith("""mps""" ):
__A : List[str] = torch.manual_seed(__A )
else:
__A : Any = torch.Generator(device=__A ).manual_seed(__A )
__A : int = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase_ ( self : Dict ):
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
__A : Tuple = self.get_dummy_components()
__A : Tuple = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(__A , __A , __A )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
__A : Optional[int] = self.get_dummy_inputs(__A )
__A : str = pipe(**__A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__A )
__A : str = self.pipeline_class.from_pretrained(__A )
pipe_loaded.to(__A )
pipe_loaded.set_progress_bar_config(disable=__A )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__A , __A ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
__A : int = self.get_dummy_inputs(__A )
__A : List[Any] = pipe_loaded(**__A )[0]
__A : str = np.abs(output - output_loaded ).max()
self.assertLess(__A , 1e-4 )
def lowerCAmelCase_ ( self : List[Any] ):
__A : str = """cpu"""
__A : int = self.get_dummy_components()
__A : Any = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
__A : Dict = self.get_dummy_mask_inputs(__A )
__A : int = pipe.generate_mask(**__A )
__A : Optional[Any] = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
__A : List[Any] = np.array([0] * 9 )
__A : str = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__A , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def lowerCAmelCase_ ( self : int ):
__A : int = """cpu"""
__A : List[Any] = self.get_dummy_components()
__A : Optional[int] = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
__A : Any = self.get_dummy_inversion_inputs(__A )
__A : List[str] = pipe.invert(**__A ).images
__A : List[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
__A : int = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
__A : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__A , 1e-3 )
def lowerCAmelCase_ ( self : Optional[Any] ):
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def lowerCAmelCase_ ( self : List[Any] ):
__A : Union[str, Any] = """cpu"""
__A : Optional[Any] = self.get_dummy_components()
__A : Dict = {"""beta_start""": 0.0_0_0_8_5, """beta_end""": 0.0_1_2, """beta_schedule""": """scaled_linear"""}
__A : Optional[int] = DPMSolverMultistepScheduler(**__A )
__A : Dict = DPMSolverMultistepInverseScheduler(**__A )
__A : List[Any] = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
__A : Union[str, Any] = self.get_dummy_inversion_inputs(__A )
__A : Optional[int] = pipe.invert(**__A ).images
__A : Any = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
__A : int = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
__A : int = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__A , 1e-3 )
@require_torch_gpu
@slow
class lowerCamelCase_ ( unittest.TestCase ):
def lowerCAmelCase_ ( self : str ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def lowerCAmelCase_ ( cls : str ):
__A : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
__A : Any = raw_image.convert("""RGB""" ).resize((768, 768) )
__A : Dict = raw_image
def lowerCAmelCase_ ( self : List[str] ):
__A : List[str] = torch.manual_seed(0 )
__A : Any = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=__A , torch_dtype=torch.floataa )
__A : int = DDIMScheduler.from_config(pipe.scheduler.config )
__A : List[str] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__A )
__A : Optional[int] = """a bowl of fruit"""
__A : int = """a bowl of pears"""
__A : Any = pipe.generate_mask(
image=self.raw_image , source_prompt=__A , target_prompt=__A , generator=__A , )
__A : List[Any] = pipe.invert(
prompt=__A , image=self.raw_image , inpaint_strength=0.7 , generator=__A ).latents
__A : Optional[int] = pipe(
prompt=__A , mask_image=__A , image_latents=__A , generator=__A , negative_prompt=__A , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
__A : Dict = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def lowerCAmelCase_ ( self : Union[str, Any] ):
__A : Tuple = torch.manual_seed(0 )
__A : Optional[Any] = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=__A , torch_dtype=torch.floataa )
__A : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__A : str = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__A )
__A : Any = """a bowl of fruit"""
__A : List[str] = """a bowl of pears"""
__A : Any = pipe.generate_mask(
image=self.raw_image , source_prompt=__A , target_prompt=__A , generator=__A , )
__A : Optional[Any] = pipe.invert(
prompt=__A , image=self.raw_image , inpaint_strength=0.7 , generator=__A , num_inference_steps=25 , ).latents
__A : Optional[Any] = pipe(
prompt=__A , mask_image=__A , image_latents=__A , generator=__A , negative_prompt=__A , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
__A : Dict = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 17 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : Optional[int] , __A : Union[str, Any] , __A : int=7 , __A : int=3 , __A : int=30 , __A : Dict=400 , __A : str=True , __A : str=None , __A : str=True , __A : Optional[int]=[0.5, 0.5, 0.5] , __A : List[str]=[0.5, 0.5, 0.5] , __A : Optional[Any]=True , __A : int=1 / 255 , __A : List[Any]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
__A : Union[str, Any] = parent
__A : Union[str, Any] = batch_size
__A : Union[str, Any] = num_channels
__A : Optional[Any] = min_resolution
__A : Union[str, Any] = max_resolution
__A : Any = do_resize
__A : Union[str, Any] = size
__A : Optional[int] = do_normalize
__A : Dict = image_mean
__A : Optional[int] = image_std
__A : Tuple = do_rescale
__A : Optional[Any] = rescale_factor
__A : Tuple = do_pad
def lowerCAmelCase_ ( self : Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase_ ( self : Optional[Any] , __A : Optional[int] , __A : Dict=False ):
if not batched:
__A : Union[str, Any] = image_inputs[0]
if isinstance(__A , Image.Image ):
__A , __A : Union[str, Any] = image.size
else:
__A , __A : Optional[int] = image.shape[1], image.shape[2]
if w < h:
__A : Optional[int] = int(self.size["""shortest_edge"""] * h / w )
__A : Dict = self.size["""shortest_edge"""]
elif w > h:
__A : Optional[Any] = self.size["""shortest_edge"""]
__A : List[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
__A : Union[str, Any] = self.size["""shortest_edge"""]
__A : str = self.size["""shortest_edge"""]
else:
__A : Any = []
for image in image_inputs:
__A , __A : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__A : Tuple = max(__A , key=lambda __A : item[0] )[0]
__A : Union[str, Any] = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase_ ( _lowercase , unittest.TestCase ):
_lowercase : Tuple = DetaImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Optional[Any] ):
__A : Tuple = DetaImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : List[str] ):
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """image_mean""" ) )
self.assertTrue(hasattr(__A , """image_std""" ) )
self.assertTrue(hasattr(__A , """do_normalize""" ) )
self.assertTrue(hasattr(__A , """do_resize""" ) )
self.assertTrue(hasattr(__A , """do_rescale""" ) )
self.assertTrue(hasattr(__A , """do_pad""" ) )
self.assertTrue(hasattr(__A , """size""" ) )
def lowerCAmelCase_ ( self : Any ):
__A : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , __A )
def lowerCAmelCase_ ( self : Optional[Any] ):
pass
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : int = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A , __A : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
__A : List[str] = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : int = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Dict ):
# Initialize image_processing
__A : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
__A : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Tuple = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Any = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase_ ( self : Tuple ):
# prepare image and target
__A : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__A : Any = json.loads(f.read() )
__A : int = {"""image_id""": 3_9769, """annotations""": target}
# encode them
__A : List[str] = DetaImageProcessor()
__A : List[str] = image_processing(images=__A , annotations=__A , return_tensors="""pt""" )
# verify pixel values
__A : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __A )
__A : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__A : Tuple = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) )
# verify boxes
__A : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A )
__A : List[str] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1e-3 ) )
# verify image_id
__A : List[str] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) )
# verify is_crowd
__A : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) )
# verify class_labels
__A : List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) )
# verify orig_size
__A : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) )
# verify size
__A : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
# prepare image, target and masks_path
__A : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__A : Tuple = json.loads(f.read() )
__A : Optional[int] = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
__A : Any = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__A : Any = DetaImageProcessor(format="""coco_panoptic""" )
__A : int = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="""pt""" )
# verify pixel values
__A : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __A )
__A : Optional[Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__A : Union[str, Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) )
# verify boxes
__A : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A )
__A : Union[str, Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1e-3 ) )
# verify image_id
__A : Any = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) )
# verify is_crowd
__A : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) )
# verify class_labels
__A : Optional[Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) )
# verify masks
__A : List[str] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __A )
# verify orig_size
__A : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) )
# verify size
__A : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
| 17 | 1 |
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __SCREAMING_SNAKE_CASE ( a__ : Optional[int] ,a__ : Any ) -> Union[str, Any]:
assert isinstance(a__ ,a__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" ,[False, True] )
def __SCREAMING_SNAKE_CASE ( a__ : Dict ,a__ : Optional[Any] ,a__ : Tuple ) -> List[Any]:
__A : int = tmp_path / """cache"""
__A : List[str] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__A : Any = TextDatasetReader(a__ ,cache_dir=a__ ,keep_in_memory=a__ ).read()
_check_text_dataset(a__ ,a__ )
@pytest.mark.parametrize(
"""features""" ,[
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] ,)
def __SCREAMING_SNAKE_CASE ( a__ : Union[str, Any] ,a__ : List[str] ,a__ : List[str] ) -> List[str]:
__A : Optional[int] = tmp_path / """cache"""
__A : List[Any] = {"""text""": """string"""}
__A : List[str] = features.copy() if features else default_expected_features
__A : Union[str, Any] = (
Features({feature: Value(a__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__A : str = TextDatasetReader(a__ ,features=a__ ,cache_dir=a__ ).read()
_check_text_dataset(a__ ,a__ )
@pytest.mark.parametrize("""split""" ,[None, NamedSplit("""train""" ), """train""", """test"""] )
def __SCREAMING_SNAKE_CASE ( a__ : Any ,a__ : List[str] ,a__ : str ) -> Dict:
__A : Optional[int] = tmp_path / """cache"""
__A : List[Any] = {"""text""": """string"""}
__A : List[Any] = TextDatasetReader(a__ ,cache_dir=a__ ,split=a__ ).read()
_check_text_dataset(a__ ,a__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" ,[str, list] )
def __SCREAMING_SNAKE_CASE ( a__ : Dict ,a__ : List[Any] ,a__ : Optional[int] ) -> Optional[int]:
if issubclass(a__ ,a__ ):
__A : Optional[Any] = text_path
elif issubclass(a__ ,a__ ):
__A : str = [text_path]
__A : List[str] = tmp_path / """cache"""
__A : Tuple = {"""text""": """string"""}
__A : List[Any] = TextDatasetReader(a__ ,cache_dir=a__ ).read()
_check_text_dataset(a__ ,a__ )
def __SCREAMING_SNAKE_CASE ( a__ : Union[str, Any] ,a__ : List[Any] ,a__ : Optional[Any]=("train",) ) -> Tuple:
assert isinstance(a__ ,a__ )
for split in splits:
__A : Union[str, Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" ,[False, True] )
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ,a__ : str ) -> Any:
__A : Dict = tmp_path / """cache"""
__A : Optional[int] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__A : Dict = TextDatasetReader({"""train""": text_path} ,cache_dir=a__ ,keep_in_memory=a__ ).read()
_check_text_datasetdict(a__ ,a__ )
@pytest.mark.parametrize(
"""features""" ,[
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] ,)
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : int ,a__ : Dict ) -> Tuple:
__A : Any = tmp_path / """cache"""
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
__A : Optional[Any] = {"""text""": """string"""}
__A : int = features.copy() if features else default_expected_features
__A : Optional[Any] = (
Features({feature: Value(a__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__A : Tuple = TextDatasetReader({"""train""": text_path} ,features=a__ ,cache_dir=a__ ).read()
_check_text_datasetdict(a__ ,a__ )
@pytest.mark.parametrize("""split""" ,[None, NamedSplit("""train""" ), """train""", """test"""] )
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : List[Any] ,a__ : Tuple ) -> Any:
if split:
__A : int = {split: text_path}
else:
__A : Union[str, Any] = """train"""
__A : int = {"""train""": text_path, """test""": text_path}
__A : Any = tmp_path / """cache"""
__A : str = {"""text""": """string"""}
__A : List[str] = TextDatasetReader(a__ ,cache_dir=a__ ).read()
_check_text_datasetdict(a__ ,a__ ,splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 17 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__A : List[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=a__ ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=a__ ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=a__ )
return parser.parse_args()
def __SCREAMING_SNAKE_CASE ( ) -> str:
__A : Union[str, Any] = parse_args()
# Import training_script as a module.
__A : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__A : str = script_fpath.stem
__A : int = importlib.import_module(a__ )
# Patch sys.argv
__A : List[str] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 17 | 1 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = '''▁'''
UpperCAmelCase_ : List[str] = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
UpperCAmelCase_ : Dict = {
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
UpperCAmelCase_ : Optional[Any] = {
'''facebook/m2m100_418M''': 1_024,
}
# fmt: off
UpperCAmelCase_ : List[Any] = {
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class lowerCamelCase_ ( _lowercase ):
_lowercase : Optional[Any] = VOCAB_FILES_NAMES
_lowercase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : str = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Any = ['''input_ids''', '''attention_mask''']
_lowercase : List[int] = []
_lowercase : List[int] = []
def __init__( self : Dict , __A : Optional[Any] , __A : Any , __A : Tuple=None , __A : List[str]=None , __A : int="<s>" , __A : Union[str, Any]="</s>" , __A : Optional[Any]="</s>" , __A : Tuple="<pad>" , __A : List[str]="<unk>" , __A : Optional[Any]="m2m100" , __A : Optional[Dict[str, Any]] = None , __A : Any=8 , **__A : List[str] , ):
__A : int = {} if sp_model_kwargs is None else sp_model_kwargs
__A : List[Any] = language_codes
__A : List[str] = FAIRSEQ_LANGUAGE_CODES[language_codes]
__A : str = {lang_code: F"""__{lang_code}__""" for lang_code in fairseq_language_code}
__A : Optional[int] = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__A )
for lang_code in fairseq_language_code
if self.get_lang_token(__A ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__A , tgt_lang=__A , bos_token=__A , eos_token=__A , sep_token=__A , unk_token=__A , pad_token=__A , language_codes=__A , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__A , **__A , )
__A : Any = vocab_file
__A : Any = load_json(__A )
__A : Union[str, Any] = {v: k for k, v in self.encoder.items()}
__A : Optional[int] = spm_file
__A : Dict = load_spm(__A , self.sp_model_kwargs )
__A : int = len(self.encoder )
__A : Optional[Any] = {
self.get_lang_token(__A ): self.encoder_size + i for i, lang_code in enumerate(__A )
}
__A : Any = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__A )}
__A : Union[str, Any] = {v: k for k, v in self.lang_token_to_id.items()}
__A : Tuple = src_lang if src_lang is not None else """en"""
__A : Dict = tgt_lang
__A : List[Any] = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
__A : str = num_madeup_words
@property
def lowerCAmelCase_ ( self : Any ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def lowerCAmelCase_ ( self : int ):
return self._src_lang
@src_lang.setter
def lowerCAmelCase_ ( self : int , __A : str ):
__A : Optional[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCAmelCase_ ( self : str , __A : str ):
return self.sp_model.encode(__A , out_type=__A )
def lowerCAmelCase_ ( self : str , __A : Union[str, Any] ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__A , self.encoder[self.unk_token] )
def lowerCAmelCase_ ( self : int , __A : int ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__A , self.unk_token )
def lowerCAmelCase_ ( self : Tuple , __A : Optional[Any] ):
__A : Dict = []
__A : Tuple = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__A ) + token
__A : Dict = []
else:
current_sub_tokens.append(__A )
out_string += self.sp_model.decode(__A )
return out_string.strip()
def lowerCAmelCase_ ( self : List[str] , __A : List[int] , __A : Optional[List[int]] = None , __A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
__A : Optional[Any] = [1] * len(self.prefix_tokens )
__A : Optional[Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__A )) + suffix_ones
return prefix_ones + ([0] * len(__A )) + ([0] * len(__A )) + suffix_ones
def lowerCAmelCase_ ( self : Optional[int] , __A : List[int] , __A : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase_ ( self : List[str] ):
__A : Tuple = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ):
__A : List[str] = self.__dict__.copy()
__A : Union[str, Any] = None
return state
def __setstate__( self : Dict , __A : Dict ):
__A : List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__A : Union[str, Any] = {}
__A : str = load_spm(self.spm_file , self.sp_model_kwargs )
def lowerCAmelCase_ ( self : str , __A : str , __A : Optional[str] = None ):
__A : str = Path(__A )
if not save_dir.is_dir():
raise OSError(F"""{save_directory} should be a directory""" )
__A : str = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
__A : Dict = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , __A )
if os.path.abspath(self.spm_file ) != os.path.abspath(__A ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __A )
elif not os.path.isfile(self.spm_file ):
with open(__A , """wb""" ) as fi:
__A : str = self.sp_model.serialized_model_proto()
fi.write(__A )
return (str(__A ), str(__A ))
def lowerCAmelCase_ ( self : Union[str, Any] , __A : List[str] , __A : str = "en" , __A : Optional[List[str]] = None , __A : str = "ro" , **__A : Optional[int] , ):
__A : Any = src_lang
__A : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__A , __A , **__A )
def lowerCAmelCase_ ( self : int , __A : Dict , __A : Optional[str] , __A : Optional[str] , **__A : List[str] ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
__A : List[Any] = src_lang
__A : str = self(__A , add_special_tokens=__A , **__A )
__A : Optional[int] = self.get_lang_id(__A )
__A : Optional[Any] = tgt_lang_id
return inputs
def lowerCAmelCase_ ( self : List[str] ):
self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase_ ( self : Optional[Any] ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase_ ( self : Dict , __A : str ):
__A : Any = self.get_lang_token(__A )
__A : Any = self.lang_token_to_id[lang_token]
__A : Any = [self.cur_lang_id]
__A : Optional[Any] = [self.eos_token_id]
def lowerCAmelCase_ ( self : int , __A : str ):
__A : Tuple = self.get_lang_token(__A )
__A : Dict = self.lang_token_to_id[lang_token]
__A : Union[str, Any] = [self.cur_lang_id]
__A : str = [self.eos_token_id]
def lowerCAmelCase_ ( self : Tuple , __A : str ):
return self.lang_code_to_token[lang]
def lowerCAmelCase_ ( self : str , __A : str ):
__A : List[Any] = self.get_lang_token(__A )
return self.lang_token_to_id[lang_token]
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
__A : Optional[int] = sentencepiece.SentencePieceProcessor(**a__ )
spm.Load(str(a__ ) )
return spm
def __SCREAMING_SNAKE_CASE ( a__ : str ) -> Union[Dict, List]:
with open(a__ ,"""r""" ) as f:
return json.load(a__ )
def __SCREAMING_SNAKE_CASE ( a__ : Optional[Any] ,a__ : str ) -> None:
with open(a__ ,"""w""" ) as f:
json.dump(a__ ,a__ ,indent=2 )
| 17 |
from collections.abc import Sequence
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[float] ,a__ : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(a__ ) )
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[float] ,a__ : float ) -> float:
__A : Any = 0.0
for coeff in reversed(a__ ):
__A : List[str] = result * x + coeff
return result
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCAmelCase_ : str = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 17 | 1 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class lowerCamelCase_ ( _lowercase ):
def lowerCAmelCase_ ( self : Dict ):
__A : Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(__A , """num_attention_heads""" ) )
self.parent.assertTrue(hasattr(__A , """num_encoder_blocks""" ) )
class lowerCamelCase_ :
def __init__( self : List[str] , __A : str , __A : str=13 , __A : str=64 , __A : Any=3 , __A : List[str]=4 , __A : Union[str, Any]=[2, 2, 2, 2] , __A : Any=[8, 4, 2, 1] , __A : int=[16, 32, 64, 128] , __A : List[str]=[1, 4, 8, 16] , __A : Optional[int]=[1, 2, 4, 8] , __A : Optional[int]=True , __A : List[Any]=True , __A : List[Any]="gelu" , __A : Union[str, Any]=0.1 , __A : Tuple=0.1 , __A : Optional[int]=0.0_2 , __A : Dict=3 , __A : Any=None , ):
__A : Optional[Any] = parent
__A : int = batch_size
__A : Optional[int] = image_size
__A : Tuple = num_channels
__A : List[Any] = num_encoder_blocks
__A : List[Any] = sr_ratios
__A : Optional[int] = depths
__A : str = hidden_sizes
__A : str = downsampling_rates
__A : Any = num_attention_heads
__A : Optional[int] = is_training
__A : Union[str, Any] = use_labels
__A : Any = hidden_act
__A : Union[str, Any] = hidden_dropout_prob
__A : Tuple = attention_probs_dropout_prob
__A : Any = initializer_range
__A : Dict = num_labels
__A : List[Any] = scope
def lowerCAmelCase_ ( self : int ):
__A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A : Optional[Any] = None
if self.use_labels:
__A : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__A : Any = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self : Union[str, Any] ):
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self : Dict , __A : Dict , __A : List[Any] , __A : str ):
__A : List[Any] = SegformerModel(config=__A )
model.to(__A )
model.eval()
__A : Dict = model(__A )
__A : Union[str, Any] = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def lowerCAmelCase_ ( self : List[str] , __A : int , __A : Tuple , __A : List[str] ):
__A : int = self.num_labels
__A : int = SegformerForSemanticSegmentation(__A )
model.to(__A )
model.eval()
__A : int = model(__A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
__A : Union[str, Any] = model(__A , labels=__A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def lowerCAmelCase_ ( self : Union[str, Any] , __A : List[Any] , __A : List[str] , __A : Optional[Any] ):
__A : List[str] = 1
__A : Union[str, Any] = SegformerForSemanticSegmentation(config=__A )
model.to(__A )
model.eval()
__A : Any = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(__A )
__A : int = model(__A , labels=__A )
self.parent.assertGreater(result.loss , 0.0 )
def lowerCAmelCase_ ( self : List[Any] ):
__A : Optional[int] = self.prepare_config_and_inputs()
__A , __A , __A : Tuple = config_and_inputs
__A : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
_lowercase : List[Any] = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
_lowercase : List[Any] = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowercase : int = True
_lowercase : Optional[int] = False
_lowercase : Union[str, Any] = False
_lowercase : int = False
def lowerCAmelCase_ ( self : Tuple ):
__A : str = SegformerModelTester(self )
__A : Optional[int] = SegformerConfigTester(self , config_class=__A )
def lowerCAmelCase_ ( self : List[str] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : int ):
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCAmelCase_ ( self : Optional[int] ):
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*__A )
def lowerCAmelCase_ ( self : Optional[int] ):
__A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*__A )
@unittest.skip("""SegFormer does not use inputs_embeds""" )
def lowerCAmelCase_ ( self : Union[str, Any] ):
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" )
def lowerCAmelCase_ ( self : Tuple ):
pass
def lowerCAmelCase_ ( self : Optional[int] ):
__A , __A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : int = model_class(__A )
__A : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : Tuple = [*signature.parameters.keys()]
__A : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __A )
def lowerCAmelCase_ ( self : int ):
__A , __A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__A : Union[str, Any] = True
for model_class in self.all_model_classes:
__A : Optional[Any] = True
__A : Tuple = False
__A : Union[str, Any] = True
__A : List[str] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
__A : Any = model(**self._prepare_for_class(__A , __A ) )
__A : Optional[int] = outputs.attentions
__A : List[str] = sum(self.model_tester.depths )
self.assertEqual(len(__A ) , __A )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__A : Optional[Any] = True
__A : Union[str, Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
__A : Optional[Any] = model(**self._prepare_for_class(__A , __A ) )
__A : str = outputs.attentions
self.assertEqual(len(__A ) , __A )
# verify the first attentions (first block, first layer)
__A : Union[str, Any] = (self.model_tester.image_size // 4) ** 2
__A : Any = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
__A : Tuple = (self.model_tester.image_size // 32) ** 2
__A : Optional[int] = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
__A : Union[str, Any] = len(__A )
# Check attention is always last and order is fine
__A : Union[str, Any] = True
__A : List[Any] = True
__A : Any = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
__A : str = model(**self._prepare_for_class(__A , __A ) )
self.assertEqual(out_len + 1 , len(__A ) )
__A : List[Any] = outputs.attentions
self.assertEqual(len(__A ) , __A )
# verify the first attentions (first block, first layer)
__A : Tuple = (self.model_tester.image_size // 4) ** 2
__A : Any = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def lowerCAmelCase_ ( self : Tuple ):
def check_hidden_states_output(__A : Optional[int] , __A : int , __A : Dict ):
__A : List[str] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
__A : Any = model(**self._prepare_for_class(__A , __A ) )
__A : Optional[Any] = outputs.hidden_states
__A : Dict = self.model_tester.num_encoder_blocks
self.assertEqual(len(__A ) , __A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__A , __A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Optional[int] = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : Any = True
check_hidden_states_output(__A , __A , __A )
def lowerCAmelCase_ ( self : str ):
if not self.model_tester.is_training:
return
__A , __A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : List[str] = True
for model_class in self.all_model_classes:
if model_class in get_values(__A ):
continue
__A : List[str] = model_class(__A )
model.to(__A )
model.train()
__A : Any = self._prepare_for_class(__A , __A , return_labels=__A )
__A : Optional[int] = model(**__A ).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase_ ( self : List[Any] ):
pass
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : Dict = SegformerModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
__A : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : str ):
# only resize + normalize
__A : Optional[int] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__A , align=__A , do_random_crop=__A )
__A : Optional[int] = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
__A )
__A : int = prepare_img()
__A : List[Any] = image_processor(images=__A , return_tensors="""pt""" )
__A : Any = encoded_inputs.pixel_values.to(__A )
with torch.no_grad():
__A : List[str] = model(__A )
__A : Tuple = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , __A )
__A : Any = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __A , atol=1e-4 ) )
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
# only resize + normalize
__A : str = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__A , align=__A , do_random_crop=__A )
__A : Tuple = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(__A )
__A : Optional[int] = prepare_img()
__A : int = image_processor(images=__A , return_tensors="""pt""" )
__A : List[Any] = encoded_inputs.pixel_values.to(__A )
with torch.no_grad():
__A : Dict = model(__A )
__A : int = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , __A )
__A : Optional[int] = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __A , atol=1e-1 ) )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
# only resize + normalize
__A : Tuple = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__A , align=__A , do_random_crop=__A )
__A : List[Any] = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
__A )
__A : Any = prepare_img()
__A : str = image_processor(images=__A , return_tensors="""pt""" )
__A : int = encoded_inputs.pixel_values.to(__A )
with torch.no_grad():
__A : int = model(__A )
__A : Optional[int] = outputs.logits.detach().cpu()
__A : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=__A , target_sizes=[(500, 300)] )
__A : Tuple = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __A )
__A : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=__A )
__A : Dict = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , __A )
| 17 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCamelCase_ ( _lowercase ):
_lowercase : Union[str, Any] = '''EncodecFeatureExtractor'''
_lowercase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : List[Any] , __A : Any , __A : Tuple ):
super().__init__(__A , __A )
__A : Dict = self.feature_extractor
__A : List[str] = False
def lowerCAmelCase_ ( self : Union[str, Any] , __A : str=None , __A : Tuple=None , __A : Dict=True ):
return self.tokenizer.get_decoder_prompt_ids(task=__A , language=__A , no_timestamps=__A )
def __call__( self : Optional[Any] , *__A : Tuple , **__A : Tuple ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__A , **__A )
__A : str = kwargs.pop("""audio""" , __A )
__A : Optional[Any] = kwargs.pop("""sampling_rate""" , __A )
__A : int = kwargs.pop("""text""" , __A )
if len(__A ) > 0:
__A : int = args[0]
__A : Dict = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if text is not None:
__A : Dict = self.tokenizer(__A , **__A )
if audio is not None:
__A : Optional[int] = self.feature_extractor(__A , *__A , sampling_rate=__A , **__A )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__A : List[Any] = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
__A : int = audio_inputs["""padding_mask"""]
return inputs
def lowerCAmelCase_ ( self : List[str] , *__A : int , **__A : Tuple ):
__A : Optional[int] = kwargs.pop("""audio""" , __A )
__A : List[str] = kwargs.pop("""padding_mask""" , __A )
if len(__A ) > 0:
__A : Dict = args[0]
__A : Optional[int] = args[1:]
if audio_values is not None:
return self._decode_audio(__A , padding_mask=__A )
else:
return self.tokenizer.batch_decode(*__A , **__A )
def lowerCAmelCase_ ( self : Optional[Any] , *__A : Dict , **__A : Any ):
return self.tokenizer.decode(*__A , **__A )
def lowerCAmelCase_ ( self : Tuple , __A : Union[str, Any] , __A : Optional = None ):
__A : List[str] = to_numpy(__A )
__A , __A , __A : Tuple = audio_values.shape
if padding_mask is None:
return list(__A )
__A : Union[str, Any] = to_numpy(__A )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__A : List[str] = seq_len - padding_mask.shape[-1]
__A : Tuple = 1 - self.feature_extractor.padding_value
__A : Optional[int] = np.pad(__A , ((0, 0), (0, difference)) , """constant""" , constant_values=__A )
__A : int = audio_values.tolist()
for i in range(__A ):
__A : str = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__A : List[Any] = sliced_audio.reshape(__A , -1 )
return audio_values
| 17 | 1 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
UpperCAmelCase_ : Optional[Any] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
UpperCAmelCase_ : Any = '''sshleifer/student_marian_en_ro_6_1'''
UpperCAmelCase_ : Union[str, Any] = '''sshleifer/tiny-mbart'''
@require_torch
class lowerCamelCase_ ( _lowercase ):
def lowerCAmelCase_ ( self : Optional[int] , __A : Union[str, Any]=False , __A : str=None , __A : List[Any]=True , __A : Union[str, Any]=True , __A : Union[str, Any]=True , __A : Union[str, Any]=True , ):
__A : List[Any] = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=__A , num_train_epochs=1 , distributed=__A , extra_args_str=__A , predict_with_generate=__A , do_train=__A , do_eval=__A , do_predict=__A , )
__A : int = TrainerState.load_from_json(os.path.join(__A , """trainer_state.json""" ) ).log_history
if not do_eval:
return
__A : Tuple = [log for log in logs if """eval_loss""" in log.keys()]
__A : Union[str, Any] = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
__A : Tuple = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , __A )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def lowerCAmelCase_ ( self : Optional[int] ):
self.run_seqaseq_quick()
@require_torch_multi_gpu
def lowerCAmelCase_ ( self : int ):
self.run_seqaseq_quick(distributed=__A )
@require_torch_multi_gpu
def lowerCAmelCase_ ( self : Tuple ):
self.run_seqaseq_quick(distributed=__A )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self : Tuple ):
self.run_seqaseq_quick(distributed=__A , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self : Dict ):
self.run_seqaseq_quick(distributed=__A , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self : Tuple ):
self.run_seqaseq_quick(distributed=__A , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=__A )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self : Optional[Any] ):
self.run_seqaseq_quick(
distributed=__A , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=__A )
@require_apex
@require_torch_gpu
def lowerCAmelCase_ ( self : Dict ):
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=__A , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__A , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def lowerCAmelCase_ ( self : str , __A : List[str] ):
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
__A : Optional[Any] = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
__A : int = experiments[experiment_id]
__A : Union[str, Any] = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
__A : Optional[int] = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__A , extra_args_str=data["""extra_args_str"""] )
__A : List[Any] = len(re.findall(__A , cl.err ) )
self.assertEqual(__A , data["""n_matches"""] )
@slow
def lowerCAmelCase_ ( self : str ):
__A : List[str] = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=__A , learning_rate=3e-4 , num_train_epochs=10 , distributed=__A , )
# Check metrics
__A : str = TrainerState.load_from_json(os.path.join(__A , """trainer_state.json""" ) ).log_history
__A : List[str] = [log for log in logs if """eval_loss""" in log.keys()]
__A : int = eval_metrics[0]
__A : List[Any] = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , __A )
# test if do_predict saves generations and metrics
__A : Any = os.listdir(__A )
__A : Tuple = {os.path.basename(__A ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def lowerCAmelCase_ ( self : int ):
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__A : str ) -> Tuple[int, float]:
__A : List[Any] = """--skip_memory_metrics 0"""
__A : Any = self.run_trainer(
max_len=128 , model_name=__A , learning_rate=3e-4 , num_train_epochs=1 , optim=__A , distributed=__A , extra_args_str=__A , do_eval=__A , do_predict=__A , n_gpus_to_use=1 , )
# Check metrics
__A : List[Any] = TrainerState.load_from_json(Path(__A , """trainer_state.json""" ) ).log_history
__A : List[Any] = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 )
__A : Any = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 )
__A : Union[str, Any] = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
__A , __A , __A : str = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
__A , __A , __A : Any = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
__A : Optional[Any] = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
__A : Dict = gpu_peak_mem_orig + gpu_alloc_mem_orig
__A : List[Any] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
__A : Union[str, Any] = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
__A : List[Any] = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__A , __A , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
F""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"""
F""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , )
self.assertGreater(
__A , __A , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
F""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"""
F""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , )
self.assertEqual(
__A , __A , F"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" )
def lowerCAmelCase_ ( self : List[Any] , __A : int , __A : str , __A : int , __A : float = 3e-3 , __A : str = "adafactor" , __A : bool = False , __A : str = None , __A : int = 0 , __A : bool = True , __A : bool = True , __A : bool = True , __A : bool = True , __A : int = None , ):
__A : List[Any] = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
__A : Optional[int] = self.get_auto_remove_tmp_dir()
__A : str = F"""
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__A )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__A )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
""".split()
__A : Optional[int] = F"""
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__A )}
""".split()
__A : str = """
--do_predict
""".split()
__A : Union[str, Any] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F"""--optim {optim}""".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
__A : Union[str, Any] = get_gpu_count()
__A : Tuple = get_torch_dist_unique_port()
__A : int = F"""
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
""".split()
__A : Union[str, Any] = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__A , env=self.get_env() )
else:
__A : Any = ["""run_translation.py"""] + args
with patch.object(__A , """argv""" , __A ):
main()
return output_dir
| 17 |
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> int:
if not isinstance(a__ ,a__ ):
raise TypeError("""Input value must be an 'int' type""" )
__A : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 1 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : Tuple , __A : Optional[Any] , __A : bool = True , __A : Dict[str, int] = None , __A : int = 32 , __A : bool = True , __A : Union[int, float] = 1 / 255 , __A : bool = True , __A : bool = True , __A : Optional[Union[float, List[float]]] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , __A : Optional[Union[float, List[float]]] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , __A : bool = True , __A : Tuple=7 , __A : Tuple=30 , __A : List[str]=400 , __A : str=3 , ):
__A : str = parent
__A : List[Any] = do_resize
__A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 288}
__A : Optional[Any] = size_divisor
__A : str = do_rescale
__A : Tuple = rescale_factor
__A : Optional[int] = do_normalize
__A : Tuple = do_center_crop
__A : Any = image_mean
__A : Union[str, Any] = image_std
__A : str = do_pad
__A : Any = batch_size
__A : Union[str, Any] = num_channels
__A : Dict = min_resolution
__A : Tuple = max_resolution
def lowerCAmelCase_ ( self : Optional[int] ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def lowerCAmelCase_ ( self : str , __A : List[Any] , __A : Any=False ):
if not batched:
__A : Optional[Any] = self.size["""shortest_edge"""]
__A : Optional[int] = image_inputs[0]
if isinstance(__A , Image.Image ):
__A , __A : Any = image.size
else:
__A , __A : Dict = image.shape[1], image.shape[2]
__A : Tuple = size / min(__A , __A )
if h < w:
__A , __A : Union[str, Any] = size, scale * w
else:
__A , __A : str = scale * h, size
__A : List[str] = int((1333 / 800) * size )
if max(__A , __A ) > max_size:
__A : Optional[int] = max_size / max(__A , __A )
__A : Tuple = newh * scale
__A : Optional[Any] = neww * scale
__A , __A : Tuple = int(newh + 0.5 ), int(neww + 0.5 )
__A , __A : List[Any] = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__A : Dict = []
for image in image_inputs:
__A , __A : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__A : Dict = max(__A , key=lambda __A : item[0] )[0]
__A : Dict = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase_ ( _lowercase , unittest.TestCase ):
_lowercase : str = BridgeTowerImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : List[str] ):
__A : str = BridgeTowerImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : Tuple ):
__A : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """image_mean""" ) )
self.assertTrue(hasattr(__A , """image_std""" ) )
self.assertTrue(hasattr(__A , """do_normalize""" ) )
self.assertTrue(hasattr(__A , """do_resize""" ) )
self.assertTrue(hasattr(__A , """size""" ) )
self.assertTrue(hasattr(__A , """size_divisor""" ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
pass
def lowerCAmelCase_ ( self : Optional[Any] ):
# Initialize image processor
__A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
__A : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Union[str, Any] = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : int ):
# Initialize image processor
__A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
__A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : str = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : List[str] = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Dict = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : List[Any] ):
# Initialize image processor
__A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
__A : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : str = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : int = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 17 |
UpperCAmelCase_ : dict[tuple[int, int, int], int] = {}
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : int ,a__ : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__A : List[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__A : Dict = _calculate(days - 1 ,a__ ,late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__A : List[str] = _calculate(days - 1 ,absent + 1 ,0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__A : int = _calculate(days - 1 ,a__ ,0 )
__A : Optional[int] = state_late + state_absent + state_ontime
__A : Tuple = prizestrings
return prizestrings
def __SCREAMING_SNAKE_CASE ( a__ : int = 30 ) -> int:
return _calculate(a__ ,absent=0 ,late=0 )
if __name__ == "__main__":
print(solution())
| 17 | 1 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase_ ( _lowercase , unittest.TestCase ):
_lowercase : Optional[int] = BioGptTokenizer
_lowercase : List[Any] = False
def lowerCAmelCase_ ( self : str ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__A : Dict = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
__A : Tuple = dict(zip(__A , range(len(__A ) ) ) )
__A : List[str] = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
__A : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__A : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(__A ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(__A ) )
def lowerCAmelCase_ ( self : Optional[Any] , __A : List[Any] ):
__A : Optional[Any] = """lower newer"""
__A : Optional[Any] = """lower newer"""
return input_text, output_text
def lowerCAmelCase_ ( self : Dict ):
__A : int = BioGptTokenizer(self.vocab_file , self.merges_file )
__A : Any = """lower"""
__A : str = ["""low""", """er</w>"""]
__A : Optional[Any] = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
__A : Any = tokens + ["""<unk>"""]
__A : Optional[int] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
@slow
def lowerCAmelCase_ ( self : Tuple ):
__A : List[str] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
__A : str = tokenizer.encode("""sequence builders""" , add_special_tokens=__A )
__A : Any = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__A )
__A : Optional[int] = tokenizer.build_inputs_with_special_tokens(__A )
__A : Dict = tokenizer.build_inputs_with_special_tokens(__A , __A )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 17 |
class lowerCamelCase_ :
def __init__( self : Dict , __A : int , __A : Tuple , __A : List[Any] ):
__A : Optional[int] = None
__A : Any = None
__A : int = graph
self._normalize_graph(__A , __A )
__A : str = len(__A )
__A : Optional[int] = None
def lowerCAmelCase_ ( self : int , __A : Any , __A : Optional[Any] ):
if sources is int:
__A : Dict = [sources]
if sinks is int:
__A : Optional[int] = [sinks]
if len(__A ) == 0 or len(__A ) == 0:
return
__A : str = sources[0]
__A : Dict = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__A ) > 1 or len(__A ) > 1:
__A : Optional[Any] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
__A : List[Any] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
__A : str = max_input_flow
__A : Union[str, Any] = 0
__A : Any = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__A : int = max_input_flow
__A : Optional[Any] = size - 1
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowerCAmelCase_ ( self : Optional[Any] , __A : Dict ):
__A : Dict = algorithm(self )
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , __A : str ):
__A : Any = flow_network
__A : int = flow_network.verticesCount
__A : List[Any] = flow_network.sourceIndex
__A : Union[str, Any] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__A : Optional[int] = flow_network.graph
__A : str = False
def lowerCAmelCase_ ( self : List[Any] ):
if not self.executed:
self._algorithm()
__A : Any = True
def lowerCAmelCase_ ( self : List[str] ):
pass
class lowerCamelCase_ ( _lowercase ):
def __init__( self : Any , __A : List[str] ):
super().__init__(__A )
# use this to save your result
__A : str = -1
def lowerCAmelCase_ ( self : Any ):
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class lowerCamelCase_ ( _lowercase ):
def __init__( self : List[Any] , __A : Dict ):
super().__init__(__A )
__A : Tuple = [[0] * self.verticies_count for i in range(self.verticies_count )]
__A : Optional[Any] = [0] * self.verticies_count
__A : Union[str, Any] = [0] * self.verticies_count
def lowerCAmelCase_ ( self : int ):
__A : Optional[int] = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__A : List[str] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__A : Dict = 0
while i < len(__A ):
__A : List[Any] = vertices_list[i]
__A : Optional[Any] = self.heights[vertex_index]
self.process_vertex(__A )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__A ) )
__A : Any = 0
else:
i += 1
__A : Optional[int] = sum(self.preflow[self.source_index] )
def lowerCAmelCase_ ( self : Optional[Any] , __A : str ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__A , __A )
self.relabel(__A )
def lowerCAmelCase_ ( self : Dict , __A : List[str] , __A : Optional[Any] ):
__A : Union[str, Any] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowerCAmelCase_ ( self : Optional[Any] , __A : Tuple ):
__A : Tuple = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__A : Dict = self.heights[to_index]
if min_height is not None:
__A : Optional[int] = min_height + 1
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = [0]
UpperCAmelCase_ : Dict = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
UpperCAmelCase_ : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
UpperCAmelCase_ : str = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
UpperCAmelCase_ : int = flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 17 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ : List[str] = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ) -> str | Literal[False]:
__A : Tuple = list(a__ )
__A : Optional[int] = list(a__ )
__A : int = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count += 1
__A : int = """_"""
if count > 1:
return False
else:
return "".join(a__ )
def __SCREAMING_SNAKE_CASE ( a__ : list[str] ) -> list[str]:
__A : Optional[Any] = []
while True:
__A : Tuple = ["""$"""] * len(a__ )
__A : Union[str, Any] = []
for i in range(len(a__ ) ):
for j in range(i + 1 ,len(a__ ) ):
__A : int = compare_string(binary[i] ,binary[j] )
if k is False:
__A : List[str] = """*"""
__A : Any = """*"""
temp.append("""X""" )
for i in range(len(a__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(a__ ) == 0:
return pi
__A : Optional[Any] = list(set(a__ ) )
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : Sequence[float] ) -> list[str]:
__A : List[str] = []
for minterm in minterms:
__A : List[Any] = """"""
for _ in range(a__ ):
__A : Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(a__ )
return temp
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ,a__ : int ) -> bool:
__A : Optional[Any] = list(a__ )
__A : Tuple = list(a__ )
__A : Any = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __SCREAMING_SNAKE_CASE ( a__ : list[list[int]] ,a__ : list[str] ) -> list[str]:
__A : Optional[int] = []
__A : Tuple = [0] * len(a__ )
for i in range(len(chart[0] ) ):
__A : str = 0
__A : Any = -1
for j in range(len(a__ ) ):
if chart[j][i] == 1:
count += 1
__A : Optional[Any] = j
if count == 1:
__A : int = 1
for i in range(len(a__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(a__ ) ):
__A : List[str] = 0
temp.append(prime_implicants[i] )
while True:
__A : Optional[Any] = 0
__A : Any = -1
__A : int = 0
for i in range(len(a__ ) ):
__A : List[Any] = chart[i].count(1 )
if count_n > max_n:
__A : Dict = count_n
__A : Tuple = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(a__ ) ):
__A : Union[str, Any] = 0
def __SCREAMING_SNAKE_CASE ( a__ : list[str] ,a__ : list[str] ) -> list[list[int]]:
__A : Any = [[0 for x in range(len(a__ ) )] for x in range(len(a__ ) )]
for i in range(len(a__ ) ):
__A : List[Any] = prime_implicants[i].count("""_""" )
for j in range(len(a__ ) ):
if is_for_table(prime_implicants[i] ,binary[j] ,a__ ):
__A : Union[str, Any] = 1
return chart
def __SCREAMING_SNAKE_CASE ( ) -> None:
__A : Any = int(input("""Enter the no. of variables\n""" ) )
__A : List[str] = [
float(a__ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
__A : Dict = decimal_to_binary(a__ ,a__ )
__A : Union[str, Any] = check(a__ )
print("""Prime Implicants are:""" )
print(a__ )
__A : Optional[Any] = prime_implicant_chart(a__ ,a__ )
__A : Any = selection(a__ ,a__ )
print("""Essential Prime Implicants are:""" )
print(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 17 | 1 |
class lowerCamelCase_ :
def __init__( self : int ):
__A : int = 0
__A : Union[str, Any] = 0
__A : Any = {}
def lowerCAmelCase_ ( self : List[Any] , __A : Dict ):
if vertex not in self.adjacency:
__A : List[Any] = {}
self.num_vertices += 1
def lowerCAmelCase_ ( self : Optional[Any] , __A : Tuple , __A : Union[str, Any] , __A : Tuple ):
self.add_vertex(__A )
self.add_vertex(__A )
if head == tail:
return
__A : Optional[Any] = weight
__A : List[Any] = weight
def lowerCAmelCase_ ( self : str ):
__A : Optional[Any] = self.get_edges()
for edge in edges:
__A , __A , __A : int = edge
edges.remove((tail, head, weight) )
for i in range(len(__A ) ):
__A : List[str] = list(edges[i] )
edges.sort(key=lambda __A : e[2] )
for i in range(len(__A ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
__A : Any = edges[i][2] + 1
for edge in edges:
__A , __A , __A : Any = edge
__A : Tuple = weight
__A : str = weight
def __str__( self : Optional[Any] ):
__A : List[str] = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
__A : int = self.adjacency[head][tail]
string += F"""{head} -> {tail} == {weight}\n"""
return string.rstrip("""\n""" )
def lowerCAmelCase_ ( self : Tuple ):
__A : List[Any] = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def lowerCAmelCase_ ( self : List[Any] ):
return self.adjacency.keys()
@staticmethod
def lowerCAmelCase_ ( __A : Dict=None , __A : Dict=None ):
__A : Optional[Any] = Graph()
if vertices is None:
__A : Dict = []
if edges is None:
__A : int = []
for vertex in vertices:
g.add_vertex(__A )
for edge in edges:
g.add_edge(*__A )
return g
class lowerCamelCase_ :
def __init__( self : Any ):
__A : int = {}
__A : List[Any] = {}
def __len__( self : Optional[int] ):
return len(self.parent )
def lowerCAmelCase_ ( self : int , __A : Tuple ):
if item in self.parent:
return self.find(__A )
__A : List[Any] = item
__A : List[str] = 0
return item
def lowerCAmelCase_ ( self : Union[str, Any] , __A : int ):
if item not in self.parent:
return self.make_set(__A )
if item != self.parent[item]:
__A : Optional[Any] = self.find(self.parent[item] )
return self.parent[item]
def lowerCAmelCase_ ( self : List[str] , __A : Tuple , __A : List[str] ):
__A : Tuple = self.find(__A )
__A : Any = self.find(__A )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
__A : Union[str, Any] = roota
return roota
if self.rank[roota] < self.rank[roota]:
__A : Union[str, Any] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
__A : Union[str, Any] = roota
return roota
return None
@staticmethod
def lowerCAmelCase_ ( __A : List[str] ):
__A : str = graph.num_vertices
__A : str = Graph.UnionFind()
__A : int = []
while num_components > 1:
__A : str = {}
for vertex in graph.get_vertices():
__A : Dict = -1
__A : List[str] = graph.get_edges()
for edge in edges:
__A , __A , __A : Dict = edge
edges.remove((tail, head, weight) )
for edge in edges:
__A , __A , __A : Optional[int] = edge
__A : Tuple = union_find.find(__A )
__A : int = union_find.find(__A )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__A : Optional[Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__A : Any = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
__A , __A , __A : int = cheap_edge[vertex]
if union_find.find(__A ) != union_find.find(__A ):
union_find.union(__A , __A )
mst_edges.append(cheap_edge[vertex] )
__A : Union[str, Any] = num_components - 1
__A : Dict = Graph.build(edges=__A )
return mst
| 17 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ,a__ : Dict ,a__ : Union[str, Any] ,a__ : Any ) -> Optional[int]: # noqa: E741
while r - l > 1:
__A : Any = (l + r) // 2
if v[m] >= key:
__A : Optional[int] = m
else:
__A : List[Any] = m # noqa: E741
return r
def __SCREAMING_SNAKE_CASE ( a__ : list[int] ) -> int:
if len(a__ ) == 0:
return 0
__A : str = [0] * len(a__ )
__A : List[str] = 1
__A : List[Any] = v[0]
for i in range(1 ,len(a__ ) ):
if v[i] < tail[0]:
__A : int = v[i]
elif v[i] > tail[length - 1]:
__A : Union[str, Any] = v[i]
length += 1
else:
__A : Any = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 1 |
UpperCAmelCase_ : dict[tuple[int, int, int], int] = {}
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : int ,a__ : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__A : List[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__A : Dict = _calculate(days - 1 ,a__ ,late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__A : List[str] = _calculate(days - 1 ,absent + 1 ,0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__A : int = _calculate(days - 1 ,a__ ,0 )
__A : Optional[int] = state_late + state_absent + state_ontime
__A : Tuple = prizestrings
return prizestrings
def __SCREAMING_SNAKE_CASE ( a__ : int = 30 ) -> int:
return _calculate(a__ ,absent=0 ,late=0 )
if __name__ == "__main__":
print(solution())
| 17 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 17 | 1 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class lowerCamelCase_ ( nn.Module ):
def __init__( self : Tuple , __A : int , __A : int , __A : int , __A : str=0.0 , __A : Optional[int] = None , __A : str = "geglu" , __A : Optional[int] = None , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = True , __A : str = "layer_norm" , __A : bool = False , ):
super().__init__()
__A : Any = only_cross_attention
__A : Dict = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero"""
__A : Optional[Any] = (num_embeds_ada_norm is not None) and norm_type == """ada_norm"""
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
F""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
__A : str = AdaLayerNorm(__A , __A )
elif self.use_ada_layer_norm_zero:
__A : Dict = AdaLayerNormZero(__A , __A )
else:
__A : Optional[int] = nn.LayerNorm(__A , elementwise_affine=__A )
__A : List[str] = Attention(
query_dim=__A , heads=__A , dim_head=__A , dropout=__A , bias=__A , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=__A , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
__A : Optional[Any] = (
AdaLayerNorm(__A , __A )
if self.use_ada_layer_norm
else nn.LayerNorm(__A , elementwise_affine=__A )
)
__A : Any = Attention(
query_dim=__A , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=__A , dim_head=__A , dropout=__A , bias=__A , upcast_attention=__A , ) # is self-attn if encoder_hidden_states is none
else:
__A : str = None
__A : int = None
# 3. Feed-forward
__A : Optional[int] = nn.LayerNorm(__A , elementwise_affine=__A )
__A : Dict = FeedForward(__A , dropout=__A , activation_fn=__A , final_dropout=__A )
# let chunk size default to None
__A : List[Any] = None
__A : Union[str, Any] = 0
def lowerCAmelCase_ ( self : List[str] , __A : Optional[int] , __A : int ):
# Sets chunk feed-forward
__A : str = chunk_size
__A : List[str] = dim
def lowerCAmelCase_ ( self : Optional[int] , __A : torch.FloatTensor , __A : Optional[torch.FloatTensor] = None , __A : Optional[torch.FloatTensor] = None , __A : Optional[torch.FloatTensor] = None , __A : Optional[torch.LongTensor] = None , __A : Dict[str, Any] = None , __A : Optional[torch.LongTensor] = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
__A : Tuple = self.norma(__A , __A )
elif self.use_ada_layer_norm_zero:
__A , __A , __A , __A , __A : Union[str, Any] = self.norma(
__A , __A , __A , hidden_dtype=hidden_states.dtype )
else:
__A : List[str] = self.norma(__A )
__A : Tuple = cross_attention_kwargs if cross_attention_kwargs is not None else {}
__A : Optional[int] = self.attna(
__A , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=__A , **__A , )
if self.use_ada_layer_norm_zero:
__A : List[Any] = gate_msa.unsqueeze(1 ) * attn_output
__A : Optional[int] = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
__A : Union[str, Any] = (
self.norma(__A , __A ) if self.use_ada_layer_norm else self.norma(__A )
)
__A : Tuple = self.attna(
__A , encoder_hidden_states=__A , attention_mask=__A , **__A , )
__A : Union[str, Any] = attn_output + hidden_states
# 3. Feed-forward
__A : Tuple = self.norma(__A )
if self.use_ada_layer_norm_zero:
__A : Tuple = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
__A : str = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
__A : List[str] = torch.cat(
[self.ff(__A ) for hid_slice in norm_hidden_states.chunk(__A , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
__A : List[Any] = self.ff(__A )
if self.use_ada_layer_norm_zero:
__A : List[Any] = gate_mlp.unsqueeze(1 ) * ff_output
__A : Any = ff_output + hidden_states
return hidden_states
class lowerCamelCase_ ( nn.Module ):
def __init__( self : Union[str, Any] , __A : int , __A : Optional[int] = None , __A : int = 4 , __A : float = 0.0 , __A : str = "geglu" , __A : bool = False , ):
super().__init__()
__A : Any = int(dim * mult )
__A : Optional[int] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
__A : Tuple = GELU(__A , __A )
if activation_fn == "gelu-approximate":
__A : int = GELU(__A , __A , approximate="""tanh""" )
elif activation_fn == "geglu":
__A : List[str] = GEGLU(__A , __A )
elif activation_fn == "geglu-approximate":
__A : Any = ApproximateGELU(__A , __A )
__A : Optional[int] = nn.ModuleList([] )
# project in
self.net.append(__A )
# project dropout
self.net.append(nn.Dropout(__A ) )
# project out
self.net.append(nn.Linear(__A , __A ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(__A ) )
def lowerCAmelCase_ ( self : Any , __A : Union[str, Any] ):
for module in self.net:
__A : List[Any] = module(__A )
return hidden_states
class lowerCamelCase_ ( nn.Module ):
def __init__( self : Optional[Any] , __A : int , __A : int , __A : str = "none" ):
super().__init__()
__A : Dict = nn.Linear(__A , __A )
__A : List[Any] = approximate
def lowerCAmelCase_ ( self : str , __A : Optional[Any] ):
if gate.device.type != "mps":
return F.gelu(__A , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def lowerCAmelCase_ ( self : List[Any] , __A : Optional[int] ):
__A : Union[str, Any] = self.proj(__A )
__A : Tuple = self.gelu(__A )
return hidden_states
class lowerCamelCase_ ( nn.Module ):
def __init__( self : Union[str, Any] , __A : int , __A : int ):
super().__init__()
__A : Optional[int] = nn.Linear(__A , dim_out * 2 )
def lowerCAmelCase_ ( self : Tuple , __A : Tuple ):
if gate.device.type != "mps":
return F.gelu(__A )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def lowerCAmelCase_ ( self : int , __A : Dict ):
__A , __A : Dict = self.proj(__A ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(__A )
class lowerCamelCase_ ( nn.Module ):
def __init__( self : Optional[Any] , __A : int , __A : int ):
super().__init__()
__A : Tuple = nn.Linear(__A , __A )
def lowerCAmelCase_ ( self : int , __A : Tuple ):
__A : List[str] = self.proj(__A )
return x * torch.sigmoid(1.7_0_2 * x )
class lowerCamelCase_ ( nn.Module ):
def __init__( self : int , __A : str , __A : str ):
super().__init__()
__A : Optional[Any] = nn.Embedding(__A , __A )
__A : Any = nn.SiLU()
__A : Optional[Any] = nn.Linear(__A , embedding_dim * 2 )
__A : Optional[int] = nn.LayerNorm(__A , elementwise_affine=__A )
def lowerCAmelCase_ ( self : str , __A : Any , __A : Tuple ):
__A : List[Any] = self.linear(self.silu(self.emb(__A ) ) )
__A , __A : Union[str, Any] = torch.chunk(__A , 2 )
__A : str = self.norm(__A ) * (1 + scale) + shift
return x
class lowerCamelCase_ ( nn.Module ):
def __init__( self : Tuple , __A : Union[str, Any] , __A : int ):
super().__init__()
__A : Any = CombinedTimestepLabelEmbeddings(__A , __A )
__A : Any = nn.SiLU()
__A : Tuple = nn.Linear(__A , 6 * embedding_dim , bias=__A )
__A : Union[str, Any] = nn.LayerNorm(__A , elementwise_affine=__A , eps=1e-6 )
def lowerCAmelCase_ ( self : Tuple , __A : Any , __A : Union[str, Any] , __A : Dict , __A : Optional[int]=None ):
__A : Tuple = self.linear(self.silu(self.emb(__A , __A , hidden_dtype=__A ) ) )
__A , __A , __A , __A , __A , __A : List[Any] = emb.chunk(6 , dim=1 )
__A : str = self.norm(__A ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class lowerCamelCase_ ( nn.Module ):
def __init__( self : Dict , __A : int , __A : int , __A : int , __A : Optional[str] = None , __A : float = 1e-5 ):
super().__init__()
__A : Optional[Any] = num_groups
__A : Tuple = eps
if act_fn is None:
__A : Union[str, Any] = None
else:
__A : Tuple = get_activation(__A )
__A : Optional[Any] = nn.Linear(__A , out_dim * 2 )
def lowerCAmelCase_ ( self : List[Any] , __A : List[Any] , __A : Optional[int] ):
if self.act:
__A : Union[str, Any] = self.act(__A )
__A : List[Any] = self.linear(__A )
__A : Dict = emb[:, :, None, None]
__A , __A : str = emb.chunk(2 , dim=1 )
__A : str = F.group_norm(__A , self.num_groups , eps=self.eps )
__A : Any = x * (1 + scale) + shift
return x
| 17 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class lowerCamelCase_ ( _lowercase ):
_lowercase : Dict = '''unispeech'''
def __init__( self : str , __A : Tuple=32 , __A : List[str]=768 , __A : Dict=12 , __A : Union[str, Any]=12 , __A : Tuple=3072 , __A : Any="gelu" , __A : int=0.1 , __A : Optional[int]=0.1 , __A : List[Any]=0.1 , __A : Any=0.0 , __A : List[str]=0.0 , __A : int=0.1 , __A : List[Any]=0.1 , __A : List[str]=0.0_2 , __A : List[str]=1e-5 , __A : List[Any]="group" , __A : int="gelu" , __A : Any=(512, 512, 512, 512, 512, 512, 512) , __A : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , __A : Tuple=(10, 3, 3, 3, 3, 2, 2) , __A : Optional[int]=False , __A : Any=128 , __A : Union[str, Any]=16 , __A : Optional[Any]=False , __A : str=True , __A : Dict=0.0_5 , __A : Optional[Any]=10 , __A : Dict=2 , __A : int=0.0 , __A : List[str]=10 , __A : str=0 , __A : List[str]=320 , __A : List[Any]=2 , __A : Tuple=0.1 , __A : Optional[int]=100 , __A : Any=256 , __A : Dict=256 , __A : Tuple=0.1 , __A : List[str]="mean" , __A : int=False , __A : List[str]=False , __A : List[Any]=256 , __A : str=80 , __A : Tuple=0 , __A : Tuple=1 , __A : int=2 , __A : Dict=0.5 , **__A : List[Any] , ):
super().__init__(**__A , pad_token_id=__A , bos_token_id=__A , eos_token_id=__A )
__A : Dict = hidden_size
__A : Optional[Any] = feat_extract_norm
__A : List[Any] = feat_extract_activation
__A : str = list(__A )
__A : Optional[Any] = list(__A )
__A : Optional[int] = list(__A )
__A : List[Any] = conv_bias
__A : Optional[int] = num_conv_pos_embeddings
__A : List[Any] = num_conv_pos_embedding_groups
__A : int = len(self.conv_dim )
__A : Optional[Any] = num_hidden_layers
__A : List[str] = intermediate_size
__A : Union[str, Any] = hidden_act
__A : Optional[int] = num_attention_heads
__A : Tuple = hidden_dropout
__A : Optional[Any] = attention_dropout
__A : Union[str, Any] = activation_dropout
__A : Dict = feat_proj_dropout
__A : Optional[int] = final_dropout
__A : Dict = layerdrop
__A : Optional[int] = layer_norm_eps
__A : Optional[Any] = initializer_range
__A : Optional[int] = num_ctc_classes
__A : Dict = vocab_size
__A : List[str] = do_stable_layer_norm
__A : Tuple = use_weighted_layer_sum
__A : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__A : Tuple = apply_spec_augment
__A : Union[str, Any] = mask_time_prob
__A : Optional[Any] = mask_time_length
__A : List[Any] = mask_time_min_masks
__A : List[Any] = mask_feature_prob
__A : Any = mask_feature_length
__A : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__A : Any = num_codevectors_per_group
__A : Tuple = num_codevector_groups
__A : List[str] = contrastive_logits_temperature
__A : Optional[int] = feat_quantizer_dropout
__A : int = num_negatives
__A : List[str] = codevector_dim
__A : int = proj_codevector_dim
__A : Union[str, Any] = diversity_loss_weight
# ctc loss
__A : List[str] = ctc_loss_reduction
__A : Any = ctc_zero_infinity
# pretraining loss
__A : Union[str, Any] = replace_prob
@property
def lowerCAmelCase_ ( self : int ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 17 | 1 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase_ : int = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCAmelCase_ : Dict = {
'''allenai/led-base-16384''': 16_384,
}
class lowerCamelCase_ ( _lowercase ):
_lowercase : str = VOCAB_FILES_NAMES
_lowercase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Dict = LEDTokenizer
_lowercase : List[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , __A : Dict=None , __A : Tuple=None , __A : Optional[int]=None , __A : Any="replace" , __A : Any="<s>" , __A : Any="</s>" , __A : List[str]="</s>" , __A : List[Any]="<s>" , __A : Dict="<unk>" , __A : Optional[int]="<pad>" , __A : List[Any]="<mask>" , __A : List[Any]=False , __A : Tuple=True , **__A : Optional[Any] , ):
super().__init__(
__A , __A , tokenizer_file=__A , errors=__A , bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , unk_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , trim_offsets=__A , **__A , )
__A : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __A ) != add_prefix_space:
__A : int = getattr(__A , pre_tok_state.pop("""type""" ) )
__A : Optional[Any] = add_prefix_space
__A : Optional[int] = pre_tok_class(**__A )
__A : Optional[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__A : Optional[int] = """post_processor"""
__A : Tuple = getattr(self.backend_tokenizer , __A , __A )
if tokenizer_component_instance:
__A : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__A : Dict = tuple(state["""sep"""] )
if "cls" in state:
__A : Optional[Any] = tuple(state["""cls"""] )
__A : Optional[Any] = False
if state.get("""add_prefix_space""" , __A ) != add_prefix_space:
__A : Tuple = add_prefix_space
__A : Union[str, Any] = True
if state.get("""trim_offsets""" , __A ) != trim_offsets:
__A : int = trim_offsets
__A : Optional[Any] = True
if changes_to_apply:
__A : Union[str, Any] = getattr(__A , state.pop("""type""" ) )
__A : Union[str, Any] = component_class(**__A )
setattr(self.backend_tokenizer , __A , __A )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCAmelCase_ ( self : List[str] ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase_ ( self : Optional[Any] , __A : Tuple ):
__A : Optional[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else value
__A : List[Any] = value
def lowerCAmelCase_ ( self : int , *__A : Optional[int] , **__A : Optional[int] ):
__A : Any = kwargs.get("""is_split_into_words""" , __A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*__A , **__A )
def lowerCAmelCase_ ( self : List[str] , *__A : Tuple , **__A : Optional[Any] ):
__A : Tuple = kwargs.get("""is_split_into_words""" , __A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*__A , **__A )
def lowerCAmelCase_ ( self : Dict , __A : str , __A : Optional[str] = None ):
__A : List[Any] = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
def lowerCAmelCase_ ( self : int , __A : int , __A : Tuple=None ):
__A : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase_ ( self : str , __A : List[int] , __A : Optional[List[int]] = None ):
__A : Dict = [self.sep_token_id]
__A : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase_ ( self : Tuple , __A : Union[Dict[str, EncodedInput], BatchEncoding] , __A : Optional[int] = None , __A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __A : Optional[int] = None , __A : Optional[bool] = None , ):
__A : Optional[Any] = super()._pad(
encoded_inputs=__A , max_length=__A , padding_strategy=__A , pad_to_multiple_of=__A , return_attention_mask=__A , )
# Load from model defaults
if return_attention_mask is None:
__A : Dict = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__A : Tuple = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__A : Dict = len(encoded_inputs["""global_attention_mask"""] ) != len(__A )
if needs_to_be_padded:
__A : Any = len(__A ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__A : int = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
__A : List[Any] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 17 |
import fire
from utils import calculate_rouge, save_json
def __SCREAMING_SNAKE_CASE ( a__ : Any ,a__ : Tuple ,a__ : Any=None ,**a__ : Dict ) -> Optional[Any]:
__A : int = [x.strip() for x in open(a__ ).readlines()]
__A : List[str] = [x.strip() for x in open(a__ ).readlines()][: len(a__ )]
__A : List[Any] = calculate_rouge(a__ ,a__ ,**a__ )
if save_path is not None:
save_json(a__ ,a__ ,indent=a__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 17 | 1 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
UpperCAmelCase_ : Tuple = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def __SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
__A : int = """https://pypi.org/pypi/diffusers/json"""
__A : List[str] = json.loads(request.urlopen(a__ ).read() )["""releases"""].keys()
return sorted(a__ ,key=lambda a__ : version.Version(a__ ) )
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(a__ )
os.makedirs(a__ ,exist_ok=a__ )
__A : str = Path(a__ ) / """__init__.py"""
if not init_path.exists():
init_path.touch()
def __SCREAMING_SNAKE_CASE ( a__ : Union[str, os.PathLike] ) -> List[Any]:
init_hf_modules()
__A : Union[str, Any] = Path(a__ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(a__ ,exist_ok=a__ )
__A : Optional[Any] = dynamic_module_path / """__init__.py"""
if not init_path.exists():
init_path.touch()
def __SCREAMING_SNAKE_CASE ( a__ : List[Any] ) -> Optional[int]:
with open(a__ ,"""r""" ,encoding="""utf-8""" ) as f:
__A : Optional[int] = f.read()
# Imports of the form `import .xxx`
__A : str = re.findall("""^\s*import\s+\.(\S+)\s*$""" ,a__ ,flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("""^\s*from\s+\.(\S+)\s+import""" ,a__ ,flags=re.MULTILINE )
# Unique-ify
return list(set(a__ ) )
def __SCREAMING_SNAKE_CASE ( a__ : List[Any] ) -> Union[str, Any]:
__A : List[str] = False
__A : Any = [module_file]
__A : Any = []
# Let's recurse through all relative imports
while not no_change:
__A : Dict = []
for f in files_to_check:
new_imports.extend(get_relative_imports(a__ ) )
__A : Optional[Any] = Path(a__ ).parent
__A : Tuple = [str(module_path / m ) for m in new_imports]
__A : int = [f for f in new_import_files if f not in all_relative_imports]
__A : int = [f"""{f}.py""" for f in new_import_files]
__A : Tuple = len(a__ ) == 0
all_relative_imports.extend(a__ )
return all_relative_imports
def __SCREAMING_SNAKE_CASE ( a__ : str ) -> Optional[Any]:
with open(a__ ,"""r""" ,encoding="""utf-8""" ) as f:
__A : Dict = f.read()
# Imports of the form `import xxx`
__A : Tuple = re.findall("""^\s*import\s+(\S+)\s*$""" ,a__ ,flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("""^\s*from\s+(\S+)\s+import""" ,a__ ,flags=re.MULTILINE )
# Only keep the top-level module
__A : Tuple = [imp.split(""".""" )[0] for imp in imports if not imp.startswith(""".""" )]
# Unique-ify and test we got them all
__A : Any = list(set(a__ ) )
__A : Optional[int] = []
for imp in imports:
try:
importlib.import_module(a__ )
except ImportError:
missing_packages.append(a__ )
if len(a__ ) > 0:
raise ImportError(
"""This modeling file requires the following packages that were not found in your environment: """
f"""{", ".join(a__ )}. Run `pip install {" ".join(a__ )}`""" )
return get_relative_imports(a__ )
def __SCREAMING_SNAKE_CASE ( a__ : Optional[Any] ,a__ : str ) -> Optional[int]:
__A : Dict = module_path.replace(os.path.sep ,""".""" )
__A : Tuple = importlib.import_module(a__ )
if class_name is None:
return find_pipeline_class(a__ )
return getattr(a__ ,a__ )
def __SCREAMING_SNAKE_CASE ( a__ : Optional[Any] ) -> Union[str, Any]:
from ..pipelines import DiffusionPipeline
__A : Any = dict(inspect.getmembers(a__ ,inspect.isclass ) )
__A : Dict = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls ,a__ )
and cls.__module__.split(""".""" )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
f""" {loaded_module}.""" )
__A : Optional[Any] = cls
return pipeline_class
def __SCREAMING_SNAKE_CASE ( a__ : Union[str, os.PathLike] ,a__ : str ,a__ : Optional[Union[str, os.PathLike]] = None ,a__ : bool = False ,a__ : bool = False ,a__ : Optional[Dict[str, str]] = None ,a__ : Optional[Union[bool, str]] = None ,a__ : Optional[str] = None ,a__ : bool = False ,) -> Union[str, Any]:
__A : Any = str(a__ )
__A : Optional[Any] = os.path.join(a__ ,a__ )
if os.path.isfile(a__ ):
__A : Any = module_file_or_url
__A : int = """local"""
elif pretrained_model_name_or_path.count("""/""" ) == 0:
__A : Tuple = get_diffusers_versions()
# cut ".dev0"
__A : str = """v""" + """.""".join(__version__.split(""".""" )[:3] )
# retrieve github version that matches
if revision is None:
__A : Optional[Any] = latest_version if latest_version[1:] in available_versions else """main"""
logger.info(f"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
__A : Any = f"""v{revision}"""
elif revision == "main":
__A : Dict = revision
else:
raise ValueError(
f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
f""" {", ".join(available_versions + ["main"] )}.""" )
# community pipeline on GitHub
__A : Union[str, Any] = COMMUNITY_PIPELINES_URL.format(revision=a__ ,pipeline=a__ )
try:
__A : Optional[Any] = cached_download(
a__ ,cache_dir=a__ ,force_download=a__ ,proxies=a__ ,resume_download=a__ ,local_files_only=a__ ,use_auth_token=a__ ,)
__A : Dict = """git"""
__A : str = pretrained_model_name_or_path + """.py"""
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
__A : List[str] = hf_hub_download(
a__ ,a__ ,cache_dir=a__ ,force_download=a__ ,proxies=a__ ,resume_download=a__ ,local_files_only=a__ ,use_auth_token=a__ ,)
__A : Tuple = os.path.join("""local""" ,"""--""".join(pretrained_model_name_or_path.split("""/""" ) ) )
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
__A : Tuple = check_imports(a__ )
# Now we move the module inside our cached dynamic modules.
__A : str = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(a__ )
__A : List[Any] = Path(a__ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(a__ ,submodule_path / module_file )
for module_needed in modules_needed:
__A : int = f"""{module_needed}.py"""
shutil.copy(os.path.join(a__ ,a__ ) ,submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(a__ ,a__ ):
__A : List[Any] = use_auth_token
elif use_auth_token is True:
__A : Any = HfFolder.get_token()
else:
__A : Dict = None
__A : Union[str, Any] = model_info(a__ ,revision=a__ ,token=a__ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
__A : Union[str, Any] = submodule_path / commit_hash
__A : Union[str, Any] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(a__ )
if not (submodule_path / module_file).exists():
shutil.copy(a__ ,submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
a__ ,f"""{module_needed}.py""" ,cache_dir=a__ ,force_download=a__ ,resume_download=a__ ,proxies=a__ ,use_auth_token=a__ ,revision=a__ ,local_files_only=a__ ,)
return os.path.join(a__ ,a__ )
def __SCREAMING_SNAKE_CASE ( a__ : Union[str, os.PathLike] ,a__ : str ,a__ : Optional[str] = None ,a__ : Optional[Union[str, os.PathLike]] = None ,a__ : bool = False ,a__ : bool = False ,a__ : Optional[Dict[str, str]] = None ,a__ : Optional[Union[bool, str]] = None ,a__ : Optional[str] = None ,a__ : bool = False ,**a__ : Optional[Any] ,) -> List[Any]:
__A : Optional[int] = get_cached_module_file(
a__ ,a__ ,cache_dir=a__ ,force_download=a__ ,resume_download=a__ ,proxies=a__ ,use_auth_token=a__ ,revision=a__ ,local_files_only=a__ ,)
return get_class_in_module(a__ ,final_module.replace(""".py""" ,"""""" ) )
| 17 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( a__ : Optional[Any] ,a__ : Union[str, Any] ,a__ : Optional[int] ) -> List[Any]:
# Initialise PyTorch model
__A : Dict = MobileBertConfig.from_json_file(a__ )
print(f"""Building PyTorch model from configuration: {config}""" )
__A : Tuple = MobileBertForPreTraining(a__ )
# Load weights from tf checkpoint
__A : Dict = load_tf_weights_in_mobilebert(a__ ,a__ ,a__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() ,a__ )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 17 | 1 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__A : List[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=a__ ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=a__ ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=a__ )
return parser.parse_args()
def __SCREAMING_SNAKE_CASE ( ) -> str:
__A : Union[str, Any] = parse_args()
# Import training_script as a module.
__A : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__A : str = script_fpath.stem
__A : int = importlib.import_module(a__ )
# Patch sys.argv
__A : List[str] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 17 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : int = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 | 1 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCAmelCase_ : Optional[int] = 16
UpperCAmelCase_ : List[Any] = 32
def __SCREAMING_SNAKE_CASE ( a__ : Accelerator ,a__ : int = 16 ,a__ : str = "bert-base-cased" ) -> Any:
__A : Optional[Any] = AutoTokenizer.from_pretrained(a__ )
__A : List[str] = load_dataset("""glue""" ,"""mrpc""" )
def tokenize_function(a__ : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
__A : List[Any] = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=a__ ,max_length=a__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__A : List[str] = datasets.map(
a__ ,batched=a__ ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,load_from_cache_file=a__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__A : Tuple = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(a__ : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(a__ ,padding="""max_length""" ,max_length=128 ,return_tensors="""pt""" )
return tokenizer.pad(a__ ,padding="""longest""" ,return_tensors="""pt""" )
# Instantiate dataloaders.
__A : int = DataLoader(
tokenized_datasets["""train"""] ,shuffle=a__ ,collate_fn=a__ ,batch_size=a__ )
__A : str = DataLoader(
tokenized_datasets["""validation"""] ,shuffle=a__ ,collate_fn=a__ ,batch_size=a__ )
return train_dataloader, eval_dataloader
def __SCREAMING_SNAKE_CASE ( a__ : Any ,a__ : Optional[Any] ) -> int:
# Initialize accelerator
__A : Tuple = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__A : int = config["""lr"""]
__A : Union[str, Any] = int(config["""num_epochs"""] )
__A : List[Any] = int(config["""seed"""] )
__A : Optional[Any] = int(config["""batch_size"""] )
__A : Any = args.model_name_or_path
set_seed(a__ )
__A , __A : Union[str, Any] = get_dataloaders(a__ ,a__ ,a__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__A : List[Any] = AutoModelForSequenceClassification.from_pretrained(a__ ,return_dict=a__ )
# Instantiate optimizer
__A : List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__A : Optional[int] = optimizer_cls(params=model.parameters() ,lr=a__ )
if accelerator.state.deepspeed_plugin is not None:
__A : Optional[int] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
__A : List[Any] = 1
__A : Optional[Any] = (len(a__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__A : Optional[int] = get_linear_schedule_with_warmup(
optimizer=a__ ,num_warmup_steps=0 ,num_training_steps=a__ ,)
else:
__A : List[Any] = DummyScheduler(a__ ,total_num_steps=a__ ,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__A , __A , __A , __A , __A : int = accelerator.prepare(
a__ ,a__ ,a__ ,a__ ,a__ )
# We need to keep track of how many total steps we have iterated over
__A : List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
__A : List[Any] = 0
# Now we train the model
__A : Union[str, Any] = evaluate.load("""glue""" ,"""mrpc""" )
__A : List[str] = 0
__A : Dict = {}
for epoch in range(a__ ,a__ ):
model.train()
for step, batch in enumerate(a__ ):
__A : Tuple = model(**a__ )
__A : Tuple = outputs.loss
__A : Dict = loss / gradient_accumulation_steps
accelerator.backward(a__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__A : str = 0
for step, batch in enumerate(a__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__A : Optional[int] = model(**a__ )
__A : str = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__A , __A : str = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(a__ ) - 1:
__A : str = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__A : Tuple = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=a__ ,references=a__ ,)
__A : Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" ,a__ )
__A : Tuple = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
__A : Tuple = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,"""all_results.json""" ) ,"""w""" ) as f:
json.dump(a__ ,a__ )
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__A : Tuple = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" ,type=a__ ,default="""bert-base-cased""" ,help="""Path to pretrained model or model identifier from huggingface.co/models.""" ,required=a__ ,)
parser.add_argument(
"""--output_dir""" ,type=a__ ,default=""".""" ,help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" ,)
parser.add_argument(
"""--performance_lower_bound""" ,type=a__ ,default=a__ ,help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" ,)
parser.add_argument(
"""--num_epochs""" ,type=a__ ,default=3 ,help="""Number of train epochs.""" ,)
__A : Dict = parser.parse_args()
__A : List[Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(a__ ,a__ )
if __name__ == "__main__":
main()
| 17 |
import math
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , __A : List[str]=0 ): # a graph with Node 0,1,...,N-1
__A : List[str] = n
__A : List[str] = [
[math.inf for j in range(0 , __A )] for i in range(0 , __A )
] # adjacency matrix for weight
__A : str = [
[math.inf for j in range(0 , __A )] for i in range(0 , __A )
] # dp[i][j] stores minimum distance from i to j
def lowerCAmelCase_ ( self : str , __A : Union[str, Any] , __A : Any , __A : Optional[int] ):
__A : List[Any] = w
def lowerCAmelCase_ ( self : Union[str, Any] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__A : List[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def lowerCAmelCase_ ( self : int , __A : List[str] , __A : List[str] ):
return self.dp[u][v]
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 17 | 1 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ) -> str | Literal[False]:
__A : Tuple = list(a__ )
__A : Optional[int] = list(a__ )
__A : int = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count += 1
__A : int = """_"""
if count > 1:
return False
else:
return "".join(a__ )
def __SCREAMING_SNAKE_CASE ( a__ : list[str] ) -> list[str]:
__A : Optional[Any] = []
while True:
__A : Tuple = ["""$"""] * len(a__ )
__A : Union[str, Any] = []
for i in range(len(a__ ) ):
for j in range(i + 1 ,len(a__ ) ):
__A : int = compare_string(binary[i] ,binary[j] )
if k is False:
__A : List[str] = """*"""
__A : Any = """*"""
temp.append("""X""" )
for i in range(len(a__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(a__ ) == 0:
return pi
__A : Optional[Any] = list(set(a__ ) )
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : Sequence[float] ) -> list[str]:
__A : List[str] = []
for minterm in minterms:
__A : List[Any] = """"""
for _ in range(a__ ):
__A : Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(a__ )
return temp
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ,a__ : int ) -> bool:
__A : Optional[Any] = list(a__ )
__A : Tuple = list(a__ )
__A : Any = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __SCREAMING_SNAKE_CASE ( a__ : list[list[int]] ,a__ : list[str] ) -> list[str]:
__A : Optional[int] = []
__A : Tuple = [0] * len(a__ )
for i in range(len(chart[0] ) ):
__A : str = 0
__A : Any = -1
for j in range(len(a__ ) ):
if chart[j][i] == 1:
count += 1
__A : Optional[Any] = j
if count == 1:
__A : int = 1
for i in range(len(a__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(a__ ) ):
__A : List[str] = 0
temp.append(prime_implicants[i] )
while True:
__A : Optional[Any] = 0
__A : Any = -1
__A : int = 0
for i in range(len(a__ ) ):
__A : List[Any] = chart[i].count(1 )
if count_n > max_n:
__A : Dict = count_n
__A : Tuple = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(a__ ) ):
__A : Union[str, Any] = 0
def __SCREAMING_SNAKE_CASE ( a__ : list[str] ,a__ : list[str] ) -> list[list[int]]:
__A : Any = [[0 for x in range(len(a__ ) )] for x in range(len(a__ ) )]
for i in range(len(a__ ) ):
__A : List[Any] = prime_implicants[i].count("""_""" )
for j in range(len(a__ ) ):
if is_for_table(prime_implicants[i] ,binary[j] ,a__ ):
__A : Union[str, Any] = 1
return chart
def __SCREAMING_SNAKE_CASE ( ) -> None:
__A : Any = int(input("""Enter the no. of variables\n""" ) )
__A : List[str] = [
float(a__ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
__A : Dict = decimal_to_binary(a__ ,a__ )
__A : Union[str, Any] = check(a__ )
print("""Prime Implicants are:""" )
print(a__ )
__A : Optional[Any] = prime_implicant_chart(a__ ,a__ )
__A : Any = selection(a__ ,a__ )
print("""Essential Prime Implicants are:""" )
print(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 17 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : complex ,a__ : str = "x" ,a__ : float = 10**-10 ,a__ : int = 1 ,) -> complex:
__A : Tuple = symbols(a__ )
__A : List[str] = lambdify(a__ ,a__ )
__A : Any = lambdify(a__ ,diff(a__ ,a__ ) )
__A : Dict = starting_point
while True:
if diff_function(a__ ) != 0:
__A : Optional[int] = prev_guess - multiplicity * func(a__ ) / diff_function(
a__ )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__A : List[Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 17 | 1 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
UpperCAmelCase_ : Tuple = '''.'''
if __name__ == "__main__":
UpperCAmelCase_ : Any = os.path.join(REPO_PATH, '''utils/documentation_tests.txt''')
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : Any = []
with open(doctest_file_path) as fp:
for line in fp:
UpperCAmelCase_ : Optional[int] = line.strip()
UpperCAmelCase_ : Union[str, Any] = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
UpperCAmelCase_ : Union[str, Any] = '''\n'''.join(non_existent_paths)
raise ValueError(f"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""")
if all_paths != sorted(all_paths):
raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
| 17 |
from math import sqrt
def __SCREAMING_SNAKE_CASE ( a__ : int = 1000000 ) -> int:
__A : int = 0
__A : int = 0
__A : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(a__ ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 17 | 1 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def __SCREAMING_SNAKE_CASE ( a__ : Dict[str, torch.Tensor] ) -> Dict[str, torch.Tensor]:
__A : int = []
__A : str = []
__A : List[Any] = []
for rt in rc.restypes:
__A : str = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
__A : int = {name: i for i, name in enumerate(a__ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
__A : Tuple = torch.tensor(
a__ ,dtype=torch.intaa ,device=protein["""aatype"""].device ,)
__A : Dict = torch.tensor(
a__ ,dtype=torch.intaa ,device=protein["""aatype"""].device ,)
__A : Tuple = torch.tensor(
a__ ,dtype=torch.floataa ,device=protein["""aatype"""].device ,)
__A : Tuple = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
__A : Tuple = restype_atomaa_to_atomaa[protein_aatype]
__A : Tuple = restype_atomaa_mask[protein_aatype]
__A : int = residx_atomaa_mask
__A : List[Any] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
__A : List[str] = restype_atomaa_to_atomaa[protein_aatype]
__A : Optional[Any] = residx_atomaa_to_atomaa.long()
# create the corresponding mask
__A : int = torch.zeros([21, 37] ,dtype=torch.floataa ,device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
__A : List[Any] = rc.restype_atoa[restype_letter]
__A : Optional[Any] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
__A : Optional[Any] = rc.atom_order[atom_name]
__A : Optional[Any] = 1
__A : int = restype_atomaa_mask[protein_aatype]
__A : Tuple = residx_atomaa_mask
return protein
def __SCREAMING_SNAKE_CASE ( a__ : Dict[str, torch.Tensor] ) -> Dict[str, np.ndarray]:
__A : int = tree_map(lambda a__ : torch.tensor(a__ ,device=batch["""aatype"""].device ) ,a__ ,np.ndarray )
__A : int = tensor_tree_map(lambda a__ : np.array(a__ ) ,make_atomaa_masks(a__ ) )
return out
| 17 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase_ ( _lowercase ):
_lowercase : List[str] = ['''pixel_values''']
def __init__( self : Dict , __A : bool = True , __A : Optional[Dict[str, int]] = None , __A : PILImageResampling = PILImageResampling.BILINEAR , __A : bool = True , __A : Dict[str, int] = None , __A : bool = True , __A : Union[int, float] = 1 / 255 , __A : bool = True , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , **__A : int , ):
super().__init__(**__A )
__A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 256}
__A : Dict = get_size_dict(__A , default_to_square=__A )
__A : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__A : int = get_size_dict(__A , param_name="""crop_size""" )
__A : str = do_resize
__A : Dict = size
__A : Any = resample
__A : Optional[Any] = do_center_crop
__A : List[str] = crop_size
__A : Optional[int] = do_rescale
__A : int = rescale_factor
__A : Union[str, Any] = do_normalize
__A : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__A : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase_ ( self : Optional[Any] , __A : np.ndarray , __A : Dict[str, int] , __A : PILImageResampling = PILImageResampling.BICUBIC , __A : Optional[Union[str, ChannelDimension]] = None , **__A : List[Any] , ):
__A : str = get_size_dict(__A , default_to_square=__A )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__A : Dict = get_resize_output_image_size(__A , size=size["""shortest_edge"""] , default_to_square=__A )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : Tuple , __A : np.ndarray , __A : Dict[str, int] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : str , ):
__A : str = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(__A , size=(size["""height"""], size["""width"""]) , data_format=__A , **__A )
def lowerCAmelCase_ ( self : List[str] , __A : np.ndarray , __A : float , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Optional[int] ):
return rescale(__A , scale=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : Any , __A : np.ndarray , __A : Union[float, List[float]] , __A : Union[float, List[float]] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Tuple , ):
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : int , __A : ImageInput , __A : Optional[bool] = None , __A : Dict[str, int] = None , __A : PILImageResampling = None , __A : bool = None , __A : Dict[str, int] = None , __A : Optional[bool] = None , __A : Optional[float] = None , __A : Optional[bool] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[str, TensorType]] = None , __A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__A : Optional[int] , ):
__A : List[str] = do_resize if do_resize is not None else self.do_resize
__A : Any = size if size is not None else self.size
__A : Union[str, Any] = get_size_dict(__A , default_to_square=__A )
__A : Tuple = resample if resample is not None else self.resample
__A : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
__A : List[Any] = crop_size if crop_size is not None else self.crop_size
__A : int = get_size_dict(__A , param_name="""crop_size""" )
__A : Tuple = do_rescale if do_rescale is not None else self.do_rescale
__A : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__A : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
__A : Optional[int] = image_mean if image_mean is not None else self.image_mean
__A : List[str] = image_std if image_std is not None else self.image_std
__A : Union[str, Any] = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__A : Union[str, Any] = [to_numpy_array(__A ) for image in images]
if do_resize:
__A : int = [self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_center_crop:
__A : Optional[Any] = [self.center_crop(image=__A , size=__A ) for image in images]
if do_rescale:
__A : List[Any] = [self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
__A : Any = [self.normalize(image=__A , mean=__A , std=__A ) for image in images]
__A : int = [to_channel_dimension_format(__A , __A ) for image in images]
__A : Tuple = {"""pixel_values""": images}
return BatchFeature(data=__A , tensor_type=__A )
def lowerCAmelCase_ ( self : int , __A : List[str] , __A : List[Tuple] = None ):
__A : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__A ) != len(__A ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(__A ):
__A : str = target_sizes.numpy()
__A : int = []
for idx in range(len(__A ) ):
__A : Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=__A )
__A : Union[str, Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__A )
else:
__A : List[str] = logits.argmax(dim=1 )
__A : Tuple = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 17 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : Union[str, Any] = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = ['''YolosFeatureExtractor''']
UpperCAmelCase_ : Tuple = ['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 |
class lowerCamelCase_ :
def __init__( self : Dict , __A : Tuple , __A : Optional[int] , __A : int ):
__A : List[str] = name
__A : Optional[int] = value
__A : Optional[Any] = weight
def __repr__( self : Any ):
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def lowerCAmelCase_ ( self : Union[str, Any] ):
return self.value
def lowerCAmelCase_ ( self : str ):
return self.name
def lowerCAmelCase_ ( self : str ):
return self.weight
def lowerCAmelCase_ ( self : Dict ):
return self.value / self.weight
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : Optional[int] ,a__ : Union[str, Any] ) -> int:
__A : Tuple = []
for i in range(len(a__ ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def __SCREAMING_SNAKE_CASE ( a__ : Tuple ,a__ : Any ,a__ : Optional[int] ) -> Tuple:
__A : Optional[int] = sorted(a__ ,key=a__ ,reverse=a__ )
__A : Optional[Any] = []
__A , __A : Tuple = 0.0, 0.0
for i in range(len(a__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 1 |
import datasets
from .evaluate import evaluate
UpperCAmelCase_ : str = '''\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
'''
UpperCAmelCase_ : List[Any] = '''
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
'''
UpperCAmelCase_ : Tuple = '''
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the SQuAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]
>>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]
>>> squad_metric = datasets.load_metric("squad")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def lowerCAmelCase_ ( self : List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def lowerCAmelCase_ ( self : Optional[Any] , __A : List[str] , __A : List[str] ):
__A : Any = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
__A : Tuple = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
__A : Optional[Any] = evaluate(dataset=__A , predictions=__A )
return score
| 17 |
UpperCAmelCase_ : dict[str, float] = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.35_5818,
}
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ,a__ : float ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__A : Optional[int] = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {", ".join(a__ )}"""
)
raise ValueError(a__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 1 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ :
def __init__( self : str , __A : Tuple , __A : Tuple=13 , __A : List[str]=7 , __A : List[str]=True , __A : Any=True , __A : List[str]=True , __A : Optional[int]=True , __A : Union[str, Any]=99 , __A : str=16 , __A : Optional[Any]=36 , __A : Union[str, Any]=6 , __A : Optional[int]=6 , __A : Optional[int]=6 , __A : List[str]=37 , __A : Optional[int]="gelu" , __A : List[str]=0.1 , __A : List[str]=0.1 , __A : Tuple=512 , __A : List[str]=16 , __A : int=2 , __A : Tuple=0.0_2 , __A : Optional[int]=3 , __A : List[Any]=4 , __A : int=None , ):
__A : Optional[int] = parent
__A : str = batch_size
__A : List[Any] = seq_length
__A : Union[str, Any] = is_training
__A : Dict = use_input_mask
__A : str = use_token_type_ids
__A : List[Any] = use_labels
__A : Optional[int] = vocab_size
__A : Dict = embedding_size
__A : Optional[int] = hidden_size
__A : Optional[Any] = num_hidden_layers
__A : Optional[Any] = num_hidden_groups
__A : List[Any] = num_attention_heads
__A : Optional[Any] = intermediate_size
__A : List[Any] = hidden_act
__A : Tuple = hidden_dropout_prob
__A : str = attention_probs_dropout_prob
__A : Dict = max_position_embeddings
__A : Optional[int] = type_vocab_size
__A : Dict = type_sequence_label_size
__A : List[str] = initializer_range
__A : Optional[int] = num_labels
__A : Tuple = num_choices
__A : Union[str, Any] = scope
def lowerCAmelCase_ ( self : Union[str, Any] ):
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : Any = None
if self.use_input_mask:
__A : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__A : Dict = None
if self.use_token_type_ids:
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A : Optional[Any] = None
__A : Dict = None
__A : List[str] = None
if self.use_labels:
__A : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__A : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : Dict ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCAmelCase_ ( self : int , __A : Dict , __A : Dict , __A : str , __A : Optional[int] , __A : Optional[Any] , __A : str , __A : int ):
__A : List[Any] = AlbertModel(config=__A )
model.to(__A )
model.eval()
__A : int = model(__A , attention_mask=__A , token_type_ids=__A )
__A : Dict = model(__A , token_type_ids=__A )
__A : int = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self : Union[str, Any] , __A : str , __A : Tuple , __A : Union[str, Any] , __A : Any , __A : Union[str, Any] , __A : Tuple , __A : List[Any] ):
__A : int = AlbertForPreTraining(config=__A )
model.to(__A )
model.eval()
__A : Any = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , sentence_order_label=__A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCAmelCase_ ( self : List[Any] , __A : Dict , __A : int , __A : Optional[int] , __A : Optional[int] , __A : Any , __A : Dict , __A : Optional[int] ):
__A : List[str] = AlbertForMaskedLM(config=__A )
model.to(__A )
model.eval()
__A : Tuple = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : List[str] , __A : Optional[int] , __A : Any , __A : Union[str, Any] , __A : int , __A : Any , __A : Optional[Any] , __A : Union[str, Any] ):
__A : List[str] = AlbertForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
__A : str = model(
__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : Optional[int] , __A : Tuple , __A : str , __A : Tuple , __A : List[str] , __A : int , __A : List[Any] , __A : Optional[Any] ):
__A : Dict = self.num_labels
__A : Optional[Any] = AlbertForSequenceClassification(__A )
model.to(__A )
model.eval()
__A : str = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Any , __A : List[Any] , __A : Tuple , __A : Optional[Any] , __A : Optional[int] , __A : str , __A : Tuple , __A : str ):
__A : Any = self.num_labels
__A : Optional[int] = AlbertForTokenClassification(config=__A )
model.to(__A )
model.eval()
__A : Optional[int] = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Optional[Any] , __A : List[Any] , __A : Any , __A : Optional[int] , __A : Optional[int] , __A : Union[str, Any] , __A : Dict , __A : int ):
__A : Dict = self.num_choices
__A : List[str] = AlbertForMultipleChoice(config=__A )
model.to(__A )
model.eval()
__A : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A : Union[str, Any] = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : Optional[Any] ):
__A : List[Any] = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) : List[str] = config_and_inputs
__A : Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
_lowercase : Optional[Any] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowercase : List[str] = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase : str = True
def lowerCAmelCase_ ( self : Any , __A : Optional[Any] , __A : Optional[Any] , __A : Tuple=False ):
__A : List[str] = super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class in get_values(__A ):
__A : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__A )
__A : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def lowerCAmelCase_ ( self : Dict ):
__A : Optional[Any] = AlbertModelTester(self )
__A : Optional[int] = ConfigTester(self , config_class=__A , hidden_size=37 )
def lowerCAmelCase_ ( self : List[str] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : List[str] ):
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCAmelCase_ ( self : Optional[Any] ):
__A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__A )
def lowerCAmelCase_ ( self : int ):
__A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def lowerCAmelCase_ ( self : int ):
__A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__A )
def lowerCAmelCase_ ( self : Optional[int] ):
__A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
def lowerCAmelCase_ ( self : Union[str, Any] ):
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def lowerCAmelCase_ ( self : Tuple ):
__A : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__A : Optional[int] = type
self.model_tester.create_and_check_model(*__A )
@slow
def lowerCAmelCase_ ( self : int ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : int = AlbertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : List[str] ):
__A : Dict = AlbertModel.from_pretrained("""albert-base-v2""" )
__A : Union[str, Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__A : List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__A : Any = model(__A , attention_mask=__A )[0]
__A : str = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __A )
__A : Any = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1e-4 ) )
| 17 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ : Optional[Any] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 | 1 |
from maths.prime_check import is_prime
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> int:
if not isinstance(a__ ,a__ ):
__A : Any = f"""Input value of [number={number}] must be an integer"""
raise TypeError(a__ )
if is_prime(a__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
UpperCAmelCase_ : Optional[Any] = [
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''},
{'''dataset''': '''snli''', '''config_name''': '''plain_text'''},
{'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''},
{'''dataset''': '''wiki40b''', '''config_name''': '''en'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''},
{'''dataset''': '''natural_questions''', '''config_name''': '''default'''},
]
def __SCREAMING_SNAKE_CASE ( a__ : str=True ) -> List[Any]:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_lowercase ) )
class lowerCamelCase_ ( _lowercase ):
_lowercase : Optional[int] = None
_lowercase : str = None
def lowerCAmelCase_ ( self : Dict , __A : Optional[int] , __A : Optional[Any] ):
with TemporaryDirectory() as tmp_dir:
__A : List[Any] = dataset_module_factory(__A , cache_dir=__A )
__A : Tuple = import_main_class(dataset_module.module_path , dataset=__A )
__A : DatasetBuilder = builder_cls(
cache_dir=__A , config_name=__A , hash=dataset_module.hash , )
__A : List[Any] = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=__A ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
__A : Union[str, Any] = cached_path(__A , cache_dir=__A )
self.assertTrue(os.path.exists(__A ) )
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( a__ : Dict ) -> Optional[Any]:
__A : Optional[Any] = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
__A : Union[str, Any] = dataset_module_factory("""wikipedia""" ,cache_dir=a__ )
__A : List[Any] = import_main_class(dataset_module.module_path )
__A : DatasetBuilder = builder_cls(
cache_dir=a__ ,config_name="""20220301.frr""" ,hash=dataset_module.hash ,)
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
__A : Any = None
builder_instance.download_and_prepare()
__A : Union[str, Any] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ) -> List[str]:
__A : Tuple = dataset_module_factory("""wikipedia""" ,cache_dir=a__ )
__A : str = import_main_class(dataset_module.module_path ,dataset=a__ )
__A : DatasetBuilder = builder_cls(
cache_dir=a__ ,config_name="""20220301.frr""" ,hash=dataset_module.hash ,)
__A : Optional[int] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(a__ ,a__ )
assert "train" in ds
assert isinstance(ds["""train"""] ,a__ )
assert next(iter(ds["""train"""] ) )
| 17 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowerCAmelCase_ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : Any ):
__A : List[Any] = StableDiffusionKDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
__A : Dict = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
sd_pipe.set_scheduler("""sample_euler""" )
__A : str = """A painting of a squirrel eating a burger"""
__A : int = torch.manual_seed(0 )
__A : Tuple = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" )
__A : List[str] = output.images
__A : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__A : List[Any] = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase_ ( self : Union[str, Any] ):
__A : Union[str, Any] = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
__A : Tuple = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
sd_pipe.set_scheduler("""sample_euler""" )
__A : Tuple = """A painting of a squirrel eating a burger"""
__A : Any = torch.manual_seed(0 )
__A : str = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" )
__A : Any = output.images
__A : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__A : int = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def lowerCAmelCase_ ( self : Optional[int] ):
__A : str = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
__A : Tuple = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
sd_pipe.set_scheduler("""sample_dpmpp_2m""" )
__A : List[Any] = """A painting of a squirrel eating a burger"""
__A : Any = torch.manual_seed(0 )
__A : Optional[Any] = sd_pipe(
[prompt] , generator=__A , guidance_scale=7.5 , num_inference_steps=15 , output_type="""np""" , use_karras_sigmas=__A , )
__A : Union[str, Any] = output.images
__A : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__A : List[str] = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 17 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : Optional[int] , __A : Union[str, Any] , __A : int=7 , __A : int=3 , __A : int=30 , __A : Dict=400 , __A : str=True , __A : str=None , __A : str=True , __A : Optional[int]=[0.5, 0.5, 0.5] , __A : List[str]=[0.5, 0.5, 0.5] , __A : Optional[Any]=True , __A : int=1 / 255 , __A : List[Any]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
__A : Union[str, Any] = parent
__A : Union[str, Any] = batch_size
__A : Union[str, Any] = num_channels
__A : Optional[Any] = min_resolution
__A : Union[str, Any] = max_resolution
__A : Any = do_resize
__A : Union[str, Any] = size
__A : Optional[int] = do_normalize
__A : Dict = image_mean
__A : Optional[int] = image_std
__A : Tuple = do_rescale
__A : Optional[Any] = rescale_factor
__A : Tuple = do_pad
def lowerCAmelCase_ ( self : Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase_ ( self : Optional[Any] , __A : Optional[int] , __A : Dict=False ):
if not batched:
__A : Union[str, Any] = image_inputs[0]
if isinstance(__A , Image.Image ):
__A , __A : Union[str, Any] = image.size
else:
__A , __A : Optional[int] = image.shape[1], image.shape[2]
if w < h:
__A : Optional[int] = int(self.size["""shortest_edge"""] * h / w )
__A : Dict = self.size["""shortest_edge"""]
elif w > h:
__A : Optional[Any] = self.size["""shortest_edge"""]
__A : List[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
__A : Union[str, Any] = self.size["""shortest_edge"""]
__A : str = self.size["""shortest_edge"""]
else:
__A : Any = []
for image in image_inputs:
__A , __A : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__A : Tuple = max(__A , key=lambda __A : item[0] )[0]
__A : Union[str, Any] = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase_ ( _lowercase , unittest.TestCase ):
_lowercase : Tuple = DetaImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Optional[Any] ):
__A : Tuple = DetaImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : List[str] ):
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """image_mean""" ) )
self.assertTrue(hasattr(__A , """image_std""" ) )
self.assertTrue(hasattr(__A , """do_normalize""" ) )
self.assertTrue(hasattr(__A , """do_resize""" ) )
self.assertTrue(hasattr(__A , """do_rescale""" ) )
self.assertTrue(hasattr(__A , """do_pad""" ) )
self.assertTrue(hasattr(__A , """size""" ) )
def lowerCAmelCase_ ( self : Any ):
__A : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , __A )
def lowerCAmelCase_ ( self : Optional[Any] ):
pass
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : int = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A , __A : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
__A : List[str] = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : int = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Dict ):
# Initialize image_processing
__A : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
__A : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Tuple = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Any = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase_ ( self : Tuple ):
# prepare image and target
__A : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__A : Any = json.loads(f.read() )
__A : int = {"""image_id""": 3_9769, """annotations""": target}
# encode them
__A : List[str] = DetaImageProcessor()
__A : List[str] = image_processing(images=__A , annotations=__A , return_tensors="""pt""" )
# verify pixel values
__A : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __A )
__A : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__A : Tuple = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) )
# verify boxes
__A : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A )
__A : List[str] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1e-3 ) )
# verify image_id
__A : List[str] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) )
# verify is_crowd
__A : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) )
# verify class_labels
__A : List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) )
# verify orig_size
__A : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) )
# verify size
__A : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
# prepare image, target and masks_path
__A : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__A : Tuple = json.loads(f.read() )
__A : Optional[int] = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
__A : Any = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__A : Any = DetaImageProcessor(format="""coco_panoptic""" )
__A : int = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="""pt""" )
# verify pixel values
__A : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __A )
__A : Optional[Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__A : Union[str, Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) )
# verify boxes
__A : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A )
__A : Union[str, Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1e-3 ) )
# verify image_id
__A : Any = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) )
# verify is_crowd
__A : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) )
# verify class_labels
__A : Optional[Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) )
# verify masks
__A : List[str] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __A )
# verify orig_size
__A : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) )
# verify size
__A : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
| 17 | 1 |
import warnings
from functools import wraps
from typing import Callable
def __SCREAMING_SNAKE_CASE ( a__ : Callable ) -> Callable:
@wraps(a__ )
def _inner_fn(*a__ : Union[str, Any] ,**a__ : Any ):
warnings.warn(
(f"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") ,a__ ,)
return fn(*a__ ,**a__ )
return _inner_fn
| 17 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__A : List[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=a__ ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=a__ ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=a__ )
return parser.parse_args()
def __SCREAMING_SNAKE_CASE ( ) -> str:
__A : Union[str, Any] = parse_args()
# Import training_script as a module.
__A : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__A : str = script_fpath.stem
__A : int = importlib.import_module(a__ )
# Patch sys.argv
__A : List[str] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 17 | 1 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class lowerCamelCase_ ( _lowercase ):
_lowercase : jnp.ndarray
@flax_register_to_config
class lowerCamelCase_ ( nn.Module , _lowercase , _lowercase ):
_lowercase : int = 32
_lowercase : int = 4
_lowercase : int = 4
_lowercase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_lowercase : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
_lowercase : Union[bool, Tuple[bool]] = False
_lowercase : Tuple[int] = (320, 640, 1280, 1280)
_lowercase : int = 2
_lowercase : Union[int, Tuple[int]] = 8
_lowercase : Optional[Union[int, Tuple[int]]] = None
_lowercase : int = 1280
_lowercase : float = 0.0
_lowercase : bool = False
_lowercase : jnp.dtype = jnp.floataa
_lowercase : bool = True
_lowercase : int = 0
_lowercase : bool = False
def lowerCAmelCase_ ( self : Union[str, Any] , __A : jax.random.KeyArray ):
# init input tensors
__A : Tuple = (1, self.in_channels, self.sample_size, self.sample_size)
__A : str = jnp.zeros(__A , dtype=jnp.floataa )
__A : Optional[int] = jnp.ones((1,) , dtype=jnp.intaa )
__A : Optional[int] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__A , __A : List[Any] = jax.random.split(__A )
__A : Any = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(__A , __A , __A , __A )["params"]
def lowerCAmelCase_ ( self : List[Any] ):
__A : Union[str, Any] = self.block_out_channels
__A : Optional[Any] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"""At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.""" )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__A : Optional[int] = self.num_attention_heads or self.attention_head_dim
# input
__A : str = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__A : str = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__A : List[Any] = FlaxTimestepEmbedding(__A , dtype=self.dtype )
__A : Optional[int] = self.only_cross_attention
if isinstance(__A , __A ):
__A : int = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__A , __A ):
__A : int = (num_attention_heads,) * len(self.down_block_types )
# down
__A : Dict = []
__A : Tuple = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
__A : Optional[int] = output_channel
__A : Union[str, Any] = block_out_channels[i]
__A : Dict = i == len(__A ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__A : Any = FlaxCrossAttnDownBlockaD(
in_channels=__A , out_channels=__A , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__A : List[Any] = FlaxDownBlockaD(
in_channels=__A , out_channels=__A , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__A )
__A : Tuple = down_blocks
# mid
__A : Any = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
__A : Optional[Any] = []
__A : Tuple = list(reversed(__A ) )
__A : Union[str, Any] = list(reversed(__A ) )
__A : List[str] = list(reversed(__A ) )
__A : Union[str, Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
__A : List[Any] = output_channel
__A : Any = reversed_block_out_channels[i]
__A : str = reversed_block_out_channels[min(i + 1 , len(__A ) - 1 )]
__A : Union[str, Any] = i == len(__A ) - 1
if up_block_type == "CrossAttnUpBlock2D":
__A : Any = FlaxCrossAttnUpBlockaD(
in_channels=__A , out_channels=__A , prev_output_channel=__A , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__A : Optional[int] = FlaxUpBlockaD(
in_channels=__A , out_channels=__A , prev_output_channel=__A , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(__A )
__A : str = output_channel
__A : Optional[Any] = up_blocks
# out
__A : Dict = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__A : Any = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : List[Any] , __A : Dict , __A : int , __A : List[str] , __A : List[Any]=None , __A : Any=None , __A : bool = True , __A : bool = False , ):
# 1. time
if not isinstance(__A , jnp.ndarray ):
__A : Tuple = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__A , jnp.ndarray ) and len(timesteps.shape ) == 0:
__A : int = timesteps.astype(dtype=jnp.floataa )
__A : Union[str, Any] = jnp.expand_dims(__A , 0 )
__A : List[str] = self.time_proj(__A )
__A : str = self.time_embedding(__A )
# 2. pre-process
__A : List[str] = jnp.transpose(__A , (0, 2, 3, 1) )
__A : str = self.conv_in(__A )
# 3. down
__A : Any = (sample,)
for down_block in self.down_blocks:
if isinstance(__A , __A ):
__A , __A : Optional[int] = down_block(__A , __A , __A , deterministic=not train )
else:
__A , __A : List[str] = down_block(__A , __A , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
__A : Tuple = ()
for down_block_res_sample, down_block_additional_residual in zip(
__A , __A ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
__A : str = new_down_block_res_samples
# 4. mid
__A : Any = self.mid_block(__A , __A , __A , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
__A : int = down_block_res_samples[-(self.layers_per_block + 1) :]
__A : Tuple = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(__A , __A ):
__A : Optional[int] = up_block(
__A , temb=__A , encoder_hidden_states=__A , res_hidden_states_tuple=__A , deterministic=not train , )
else:
__A : int = up_block(__A , temb=__A , res_hidden_states_tuple=__A , deterministic=not train )
# 6. post-process
__A : Optional[Any] = self.conv_norm_out(__A )
__A : Optional[Any] = nn.silu(__A )
__A : Dict = self.conv_out(__A )
__A : str = jnp.transpose(__A , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=__A )
| 17 |
from collections.abc import Sequence
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[float] ,a__ : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(a__ ) )
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[float] ,a__ : float ) -> float:
__A : Any = 0.0
for coeff in reversed(a__ ):
__A : List[str] = result * x + coeff
return result
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCAmelCase_ : str = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 17 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.