code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from collections.abc import Sequence
def lowercase_ ( __A : Sequence[float] , __A : bool = False ) -> float:
"""simple docstring"""
if not arr:
return 0
lowercase : List[str] =0 if allow_empty_subarrays else float('''-inf''' )
lowercase : str =0.0
for num in arr:
lowercase : Optional[int] =max(0 if allow_empty_subarrays else num , curr_sum + num )
lowercase : Dict =max(_snake_case , _snake_case )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f"""{max_subarray_sum(nums) = }""")
| 94 |
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = (KDPMaDiscreteScheduler,)
UpperCAmelCase : Any = 10
def lowerCAmelCase_ ( self : Dict , **_UpperCAmelCase : Optional[Any] ):
_A = {
'num_train_timesteps': 1_100,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**_UpperCAmelCase )
return config
def lowerCAmelCase_ ( self : Any ):
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(prediction_type='v_prediction' )
_A = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
_A = self.dummy_model()
_A = self.dummy_sample_deter * scheduler.init_noise_sigma
_A = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_A = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_A = output.prev_sample
_A = torch.sum(torch.abs(_UpperCAmelCase ) )
_A = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4E-0_7 ) < 1E-2
assert abs(result_mean.item() - 6.1_1_1_2E-1_0 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2E-0_7 ) < 1E-2
assert abs(result_mean.item() - 0.0002 ) < 1E-3
def lowerCAmelCase_ ( self : Optional[Any] ):
if torch_device == "mps":
return
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
_A = self.dummy_model()
_A = self.dummy_sample_deter * scheduler.init_noise_sigma
_A = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_A = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_A = output.prev_sample
_A = torch.sum(torch.abs(_UpperCAmelCase ) )
_A = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
def lowerCAmelCase_ ( self : Any ):
if torch_device == "mps":
return
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCAmelCase )
_A = self.dummy_model()
_A = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_A = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_A = output.prev_sample
_A = torch.sum(torch.abs(_UpperCAmelCase ) )
_A = torch.mean(torch.abs(_UpperCAmelCase ) )
if str(_UpperCAmelCase ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
| 7 | 0 |
import requests
_lowercase : Tuple = "YOUR API KEY"
def _lowerCAmelCase ( UpperCamelCase__: str , UpperCamelCase__: str = giphy_api_key ) -> list:
"""simple docstring"""
A = """+""".join(query.split() )
A = f'https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'
A = requests.get(_snake_case ).json()["""data"""]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("\n".join(get_gifs("space ship")))
| 641 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def _snake_case ( _snake_case : Optional[int] , _snake_case : Optional[Any]=10 ) -> Optional[int]:
'''simple docstring'''
_A = []
for _ in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def _snake_case ( _snake_case : Optional[Any] , _snake_case : Union[str, Any]=10 ) -> List[str]:
'''simple docstring'''
_A = []
for step in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_A = os.path.join(_snake_case , 'schedule.bin' )
torch.save(scheduler.state_dict() , _snake_case )
_A = torch.load(_snake_case )
scheduler.load_state_dict(_snake_case )
return lrs
@require_torch
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple ):
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for a, b in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertAlmostEqual(_UpperCAmelCase , _UpperCAmelCase , delta=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_A = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_UpperCAmelCase )
_A = torch.tensor([0.4, 0.2, -0.5] )
_A = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_A = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
_A = criterion(_UpperCAmelCase , _UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def lowerCAmelCase_ ( self : int ):
_A = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_UpperCAmelCase )
_A = torch.tensor([0.4, 0.2, -0.5] )
_A = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_A = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-3_0, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_UpperCAmelCase , weight_decay=0.0 , relative_step=_UpperCAmelCase , scale_parameter=_UpperCAmelCase , warmup_init=_UpperCAmelCase , )
for _ in range(1_000 ):
_A = criterion(_UpperCAmelCase , _UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = nn.Linear(50 , 50 ) if is_torch_available() else None
UpperCAmelCase : Tuple = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
UpperCAmelCase : Dict = 10
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any]=None ):
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for a, b in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertAlmostEqual(_UpperCAmelCase , _UpperCAmelCase , delta=_UpperCAmelCase , msg=_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_A = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_A = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_A , _A = data
_A = scheduler_func(self.optimizer , **_UpperCAmelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_A = unwrap_schedule(_UpperCAmelCase , self.num_steps )
self.assertListAlmostEqual(
_UpperCAmelCase , _UpperCAmelCase , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , )
_A = scheduler_func(self.optimizer , **_UpperCAmelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(_UpperCAmelCase ) # wrap to test picklability of the schedule
_A = unwrap_and_save_reload_schedule(_UpperCAmelCase , self.num_steps )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase , msg=F'''failed for {scheduler_func} in save and reload''' )
class lowercase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : Optional[int] ):
_A = fn
def __call__( self : Tuple , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : List[str] ):
return self.fn(*_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Any ):
_A = list(map(self , scheduler.lr_lambdas ) )
| 7 | 0 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
UpperCAmelCase_ : List[str] = random.Random()
def _lowercase ( UpperCamelCase__ : Dict, UpperCamelCase__ : Tuple=1.0, UpperCamelCase__ : List[Any]=None, UpperCamelCase__ : Dict=None ):
if rng is None:
__A : int = global_rng
__A : Optional[int] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=7 , __lowercase=400 , __lowercase=2_000 , __lowercase=1 , __lowercase=0.0 , __lowercase=16_000 , __lowercase=True , __lowercase=True , ):
"""simple docstring"""
__A : Dict = parent
__A : List[Any] = batch_size
__A : List[Any] = min_seq_length
__A : Optional[int] = max_seq_length
__A : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__A : Union[str, Any] = feature_size
__A : str = padding_value
__A : str = sampling_rate
__A : Union[str, Any] = return_attention_mask
__A : Union[str, Any] = do_normalize
def snake_case__ ( self ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case__ ( self , __lowercase=False , __lowercase=False ):
"""simple docstring"""
def _flatten(__lowercase ):
return list(itertools.chain(*_UpperCAmelCase ) )
if equal_length:
__A : Any = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__A : Any = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__A : int = [np.asarray(_UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
class _lowerCamelCase ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : List[Any] = WavaVecaFeatureExtractor
def snake_case__ ( self ):
"""simple docstring"""
__A : Union[str, Any] = WavaVecaFeatureExtractionTester(self )
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(_UpperCAmelCase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_UpperCAmelCase , axis=0 ) - 1 ) < 1E-3 ) )
def snake_case__ ( self ):
"""simple docstring"""
__A : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__A : Any = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__A : List[Any] = [np.asarray(_UpperCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
__A : Any = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
__A : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) )
# Test batched
__A : Union[str, Any] = feat_extract(_UpperCAmelCase , return_tensors='np' ).input_values
__A : Optional[Any] = feat_extract(_UpperCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__A : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__A : Optional[Any] = np.asarray(_UpperCAmelCase )
__A : int = feat_extract(_UpperCAmelCase , return_tensors='np' ).input_values
__A : Dict = feat_extract(_UpperCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) )
def snake_case__ ( self ):
"""simple docstring"""
__A : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__A : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__A : Optional[Any] = ['longest', 'max_length', 'do_not_pad']
__A : int = [None, 1_600, None]
for max_length, padding in zip(_UpperCAmelCase , _UpperCAmelCase ):
__A : int = feat_extract(_UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors='np' )
__A : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self.assertTrue(input_values[0][1_000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def snake_case__ ( self ):
"""simple docstring"""
__A : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__A : Optional[int] = range(800 , 1_400 , 200 )
__A : List[Any] = [floats_list((1, x) )[0] for x in lengths]
__A : Dict = ['longest', 'max_length', 'do_not_pad']
__A : List[Any] = [None, 1_600, None]
for max_length, padding in zip(_UpperCAmelCase , _UpperCAmelCase ):
__A : Optional[int] = feat_extract(_UpperCAmelCase , max_length=_UpperCAmelCase , padding=_UpperCAmelCase )
__A : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def snake_case__ ( self ):
"""simple docstring"""
__A : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__A : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__A : List[str] = feat_extract(
_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=1_000 , padding='max_length' , return_tensors='np' )
__A : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def snake_case__ ( self ):
"""simple docstring"""
__A : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__A : Dict = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__A : List[str] = feat_extract(
_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=1_000 , padding='longest' , return_tensors='np' )
__A : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_000) )
__A : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__A : Dict = feat_extract(
_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=2_000 , padding='longest' , return_tensors='np' )
__A : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_200) )
@require_torch
def snake_case__ ( self ):
"""simple docstring"""
import torch
__A : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__A : Optional[int] = np.random.rand(100 ).astype(np.floataa )
__A : int = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__A : Optional[Any] = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__A : Union[str, Any] = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def snake_case__ ( self ):
"""simple docstring"""
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
__A : int = WavaVecaConfig.from_pretrained(_UpperCAmelCase )
__A : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(_UpperCAmelCase )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == 'layer' )
| 365 |
"""simple docstring"""
import math
def _snake_case ( _snake_case : float , _snake_case : float ) -> float:
'''simple docstring'''
if (
not isinstance(_snake_case , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * power_factor
def _snake_case ( _snake_case : float , _snake_case : float ) -> float:
'''simple docstring'''
if (
not isinstance(_snake_case , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 | 0 |
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__magic_name__ : Any = logging.get_logger(__name__)
__magic_name__ : List[str] = {
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class lowercase__ :
"""simple docstring"""
def __init__( self , _A=None , **_A ):
'''simple docstring'''
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
UpperCamelCase : Union[str, Any] = model
UpperCamelCase : List[str] = kwargs.get("""model_save_dir""" , _UpperCAmelCase )
UpperCamelCase : List[str] = kwargs.get("""latest_model_name""" , _UpperCAmelCase )
def __call__( self , **_A ):
'''simple docstring'''
UpperCamelCase : Any = {k: np.array(_UpperCAmelCase ) for k, v in kwargs.items()}
return self.model.run(_UpperCAmelCase , _UpperCAmelCase )
@staticmethod
def _a ( _A , _A=None , _A=None ):
'''simple docstring'''
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
UpperCamelCase : str = """CPUExecutionProvider"""
return ort.InferenceSession(_UpperCAmelCase , providers=[provider] , sess_options=_UpperCAmelCase )
def _a ( self , _A , _A = None , **_A ):
'''simple docstring'''
UpperCamelCase : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME
UpperCamelCase : int = self.model_save_dir.joinpath(self.latest_model_name )
UpperCamelCase : Tuple = Path(_UpperCAmelCase ).joinpath(_UpperCAmelCase )
try:
shutil.copyfile(_UpperCAmelCase , _UpperCAmelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
UpperCamelCase : List[str] = self.model_save_dir.joinpath(_UpperCAmelCase )
if src_path.exists():
UpperCamelCase : str = Path(_UpperCAmelCase ).joinpath(_UpperCAmelCase )
try:
shutil.copyfile(_UpperCAmelCase , _UpperCAmelCase )
except shutil.SameFileError:
pass
def _a ( self , _A , **_A , ):
'''simple docstring'''
if os.path.isfile(_UpperCAmelCase ):
logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
# saving model weights/files
self._save_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def _a ( cls , _A , _A = None , _A = None , _A = False , _A = None , _A = None , _A = None , _A = None , **_A , ):
'''simple docstring'''
UpperCamelCase : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_UpperCAmelCase ):
UpperCamelCase : Any = OnnxRuntimeModel.load_model(
os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , provider=_UpperCAmelCase , sess_options=_UpperCAmelCase )
UpperCamelCase : Optional[int] = Path(_UpperCAmelCase )
# load model from hub
else:
# download model
UpperCamelCase : Tuple = hf_hub_download(
repo_id=_UpperCAmelCase , filename=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , revision=_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , )
UpperCamelCase : str = Path(_UpperCAmelCase ).parent
UpperCamelCase : Union[str, Any] = Path(_UpperCAmelCase ).name
UpperCamelCase : Optional[Any] = OnnxRuntimeModel.load_model(_UpperCAmelCase , provider=_UpperCAmelCase , sess_options=_UpperCAmelCase )
return cls(model=_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def _a ( cls , _A , _A = True , _A = None , _A = None , **_A , ):
'''simple docstring'''
UpperCamelCase : Any = None
if len(str(_UpperCAmelCase ).split("""@""" ) ) == 2:
UpperCamelCase , UpperCamelCase : Dict = model_id.split("""@""" )
return cls._from_pretrained(
model_id=_UpperCAmelCase , revision=_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , **_UpperCAmelCase , )
| 102 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',
'''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',
'''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',
'''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',
'''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',
'''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',
'''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = '''xmod'''
def __init__( self : str , _UpperCAmelCase : Optional[Any]=30_522 , _UpperCAmelCase : Any=768 , _UpperCAmelCase : int=12 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : Dict=3_072 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : List[str]=512 , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : Any=1E-1_2 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : int=0 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : List[str]="absolute" , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : int=False , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Tuple=("en_XX",) , _UpperCAmelCase : List[str]=None , **_UpperCAmelCase : Optional[Any] , ):
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = classifier_dropout
_A = pre_norm
_A = adapter_reduction_factor
_A = adapter_layer_norm
_A = adapter_reuse_layer_norm
_A = ln_before_adapter
_A = list(_UpperCAmelCase )
_A = default_language
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self : Dict ):
if self.task == "multiple-choice":
_A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 7 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowerCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 585 |
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
a = logging.get_logger(__name__)
a = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class lowercase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCAmelCase : Dict=None , **_UpperCAmelCase : Optional[Any] ):
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
_A = model
_A = kwargs.get('model_save_dir' , _UpperCAmelCase )
_A = kwargs.get('latest_model_name' , _UpperCAmelCase )
def __call__( self : Dict , **_UpperCAmelCase : List[Any] ):
_A = {k: np.array(_UpperCAmelCase ) for k, v in kwargs.items()}
return self.model.run(_UpperCAmelCase , _UpperCAmelCase )
@staticmethod
def lowerCAmelCase_ ( _UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : List[Any]=None ):
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
_A = 'CPUExecutionProvider'
return ort.InferenceSession(_UpperCAmelCase , providers=[provider] , sess_options=_UpperCAmelCase )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : Optional[str] = None , **_UpperCAmelCase : List[Any] ):
_A = file_name if file_name is not None else ONNX_WEIGHTS_NAME
_A = self.model_save_dir.joinpath(self.latest_model_name )
_A = Path(_UpperCAmelCase ).joinpath(_UpperCAmelCase )
try:
shutil.copyfile(_UpperCAmelCase , _UpperCAmelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
_A = self.model_save_dir.joinpath(_UpperCAmelCase )
if src_path.exists():
_A = Path(_UpperCAmelCase ).joinpath(_UpperCAmelCase )
try:
shutil.copyfile(_UpperCAmelCase , _UpperCAmelCase )
except shutil.SameFileError:
pass
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : List[str] , ):
if os.path.isfile(_UpperCAmelCase ):
logger.error(F'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
# saving model weights/files
self._save_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCAmelCase_ ( cls : Tuple , _UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : Optional[Union[bool, str, None]] = None , _UpperCAmelCase : Optional[Union[str, None]] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional["ort.SessionOptions"] = None , **_UpperCAmelCase : Union[str, Any] , ):
_A = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_UpperCAmelCase ):
_A = OnnxRuntimeModel.load_model(
os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , provider=_UpperCAmelCase , sess_options=_UpperCAmelCase )
_A = Path(_UpperCAmelCase )
# load model from hub
else:
# download model
_A = hf_hub_download(
repo_id=_UpperCAmelCase , filename=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , revision=_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , )
_A = Path(_UpperCAmelCase ).parent
_A = Path(_UpperCAmelCase ).name
_A = OnnxRuntimeModel.load_model(_UpperCAmelCase , provider=_UpperCAmelCase , sess_options=_UpperCAmelCase )
return cls(model=_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] , _UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = None , **_UpperCAmelCase : Tuple , ):
_A = None
if len(str(_UpperCAmelCase ).split('@' ) ) == 2:
_A , _A = model_id.split('@' )
return cls._from_pretrained(
model_id=_UpperCAmelCase , revision=_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , **_UpperCAmelCase , )
| 7 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def lowerCamelCase ( UpperCAmelCase__ : str ) -> YolosConfig:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
SCREAMING_SNAKE_CASE__ :List[Any] = 1_9_2
SCREAMING_SNAKE_CASE__ :Tuple = 7_6_8
SCREAMING_SNAKE_CASE__ :int = 1_2
SCREAMING_SNAKE_CASE__ :int = 3
SCREAMING_SNAKE_CASE__ :str = [8_0_0, 1_3_3_3]
SCREAMING_SNAKE_CASE__ :Dict = False
elif yolos_name == "yolos_s_dWr":
SCREAMING_SNAKE_CASE__ :Optional[Any] = 3_3_0
SCREAMING_SNAKE_CASE__ :Any = 1_4
SCREAMING_SNAKE_CASE__ :Optional[int] = 6
SCREAMING_SNAKE_CASE__ :Union[str, Any] = 1_3_2_0
elif "yolos_s" in yolos_name:
SCREAMING_SNAKE_CASE__ :int = 3_8_4
SCREAMING_SNAKE_CASE__ :Tuple = 1_5_3_6
SCREAMING_SNAKE_CASE__ :int = 1_2
SCREAMING_SNAKE_CASE__ :Optional[int] = 6
elif "yolos_b" in yolos_name:
SCREAMING_SNAKE_CASE__ :Optional[int] = [8_0_0, 1_3_4_4]
SCREAMING_SNAKE_CASE__ :Any = 9_1
SCREAMING_SNAKE_CASE__ :Tuple = 'huggingface/label-files'
SCREAMING_SNAKE_CASE__ :Tuple = 'coco-detection-id2label.json'
SCREAMING_SNAKE_CASE__ :int = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE__ :List[str] = {int(_snake_case ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ :Any = idalabel
SCREAMING_SNAKE_CASE__ :Tuple = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase ( UpperCAmelCase__ : dict , UpperCAmelCase__ : YolosConfig , UpperCAmelCase__ : bool = False ) -> List[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ :List[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE__ :Any = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ :int = in_proj_weight[: config.hidden_size, :]
SCREAMING_SNAKE_CASE__ :Tuple = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE__ :Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE__ :Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE__ :Tuple = in_proj_weight[-config.hidden_size :, :]
SCREAMING_SNAKE_CASE__ :Optional[int] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase ( UpperCAmelCase__ : str ) -> str:
'''simple docstring'''
if "backbone" in name:
SCREAMING_SNAKE_CASE__ :Tuple = name.replace('backbone' , 'vit' )
if "cls_token" in name:
SCREAMING_SNAKE_CASE__ :Tuple = name.replace('cls_token' , 'embeddings.cls_token' )
if "det_token" in name:
SCREAMING_SNAKE_CASE__ :int = name.replace('det_token' , 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
SCREAMING_SNAKE_CASE__ :Tuple = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE__ :Any = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
SCREAMING_SNAKE_CASE__ :Tuple = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE__ :Tuple = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
SCREAMING_SNAKE_CASE__ :Tuple = name.replace('attn' , 'attention.self' )
if "norm1" in name:
SCREAMING_SNAKE_CASE__ :Optional[int] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
SCREAMING_SNAKE_CASE__ :List[Any] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE__ :Any = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE__ :int = name.replace('mlp.fc2' , 'output.dense' )
if "class_embed" in name:
SCREAMING_SNAKE_CASE__ :List[Any] = name.replace('class_embed' , 'class_labels_classifier' )
if "bbox_embed" in name:
SCREAMING_SNAKE_CASE__ :str = name.replace('bbox_embed' , 'bbox_predictor' )
if "vit.norm" in name:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = name.replace('vit.norm' , 'vit.layernorm' )
return name
def lowerCamelCase ( UpperCAmelCase__ : dict , UpperCAmelCase__ : YolosForObjectDetection ) -> dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ :List[Any] = orig_state_dict.pop(_snake_case )
if "qkv" in key:
SCREAMING_SNAKE_CASE__ :Any = key.split('.' )
SCREAMING_SNAKE_CASE__ :Optional[int] = int(key_split[2] )
SCREAMING_SNAKE_CASE__ :str = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE__ :List[Any] = val[:dim, :]
SCREAMING_SNAKE_CASE__ :Tuple = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ :Tuple = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE__ :Any = val[:dim]
SCREAMING_SNAKE_CASE__ :Union[str, Any] = val[dim : dim * 2]
SCREAMING_SNAKE_CASE__ :List[str] = val[-dim:]
else:
SCREAMING_SNAKE_CASE__ :Tuple = val
return orig_state_dict
def lowerCamelCase ( ) -> torch.Tensor:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE__ :Union[str, Any] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return im
@torch.no_grad()
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : bool = False ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = get_yolos_config(_snake_case )
# load original state_dict
SCREAMING_SNAKE_CASE__ :int = torch.load(_snake_case , map_location='cpu' )['model']
# load 🤗 model
SCREAMING_SNAKE_CASE__ :Tuple = YolosForObjectDetection(_snake_case )
model.eval()
SCREAMING_SNAKE_CASE__ :Optional[Any] = convert_state_dict(_snake_case , _snake_case )
model.load_state_dict(_snake_case )
# Check outputs on an image, prepared by YolosImageProcessor
SCREAMING_SNAKE_CASE__ :Optional[int] = 8_0_0 if yolos_name != 'yolos_ti' else 5_1_2
SCREAMING_SNAKE_CASE__ :int = YolosImageProcessor(format='coco_detection' , size=_snake_case )
SCREAMING_SNAKE_CASE__ :Any = image_processor(images=prepare_img() , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ :Dict = model(**_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :str = outputs.logits, outputs.pred_boxes
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :int = None, None
if yolos_name == "yolos_ti":
SCREAMING_SNAKE_CASE__ :int = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
SCREAMING_SNAKE_CASE__ :Optional[int] = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
SCREAMING_SNAKE_CASE__ :Dict = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
SCREAMING_SNAKE_CASE__ :Dict = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
SCREAMING_SNAKE_CASE__ :Union[str, Any] = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
SCREAMING_SNAKE_CASE__ :str = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
SCREAMING_SNAKE_CASE__ :List[Any] = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
SCREAMING_SNAKE_CASE__ :str = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
SCREAMING_SNAKE_CASE__ :Any = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(F'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , _snake_case , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , _snake_case , atol=1e-4 )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(F'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_snake_case )
if push_to_hub:
SCREAMING_SNAKE_CASE__ :Dict = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
SCREAMING_SNAKE_CASE__ :Any = model_mapping[yolos_name]
image_processor.push_to_hub(_snake_case , organization='hustvl' )
model.push_to_hub(_snake_case , organization='hustvl' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCamelCase_ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 209 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : str = '''speech_to_text'''
UpperCAmelCase : List[Any] = ['''past_key_values''']
UpperCAmelCase : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : int , _UpperCAmelCase : Union[str, Any]=10_000 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : int=2_048 , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : Tuple=2_048 , _UpperCAmelCase : str=4 , _UpperCAmelCase : int=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Union[str, Any]="relu" , _UpperCAmelCase : List[Any]=256 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Tuple=0 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : List[str]=6_000 , _UpperCAmelCase : Optional[Any]=1_024 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Any=(5, 5) , _UpperCAmelCase : int=1_024 , _UpperCAmelCase : str=80 , _UpperCAmelCase : Any=1 , **_UpperCAmelCase : Tuple , ):
_A = vocab_size
_A = d_model
_A = encoder_ffn_dim
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = use_cache
_A = encoder_layers
_A = scale_embedding # scale factor will be sqrt(d_model) if True
_A = max_source_positions
_A = max_target_positions
_A = num_conv_layers
_A = list(_UpperCAmelCase )
_A = conv_channels
_A = input_feat_per_channel
_A = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
| 7 | 0 |
import math
import os
import sys
def UpperCamelCase ( _UpperCAmelCase : str ) -> str:
'''simple docstring'''
_lowercase : Any = ""
try:
with open(_snake_case , "rb" ) as binary_file:
_lowercase : List[str] = binary_file.read()
for dat in data:
_lowercase : str = f"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def UpperCamelCase ( _UpperCAmelCase : dict[str, str] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : str ) -> None:
'''simple docstring'''
lexicon.pop(_snake_case )
_lowercase : str = last_match_id
if math.loga(_snake_case ).is_integer():
for curr_key in lexicon:
_lowercase : Union[str, Any] = "0" + lexicon[curr_key]
_lowercase : Optional[Any] = bin(_snake_case )[2:]
def UpperCamelCase ( _UpperCAmelCase : str ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = {"0": "0", "1": "1"}
_lowercase , _lowercase : int = "", ""
_lowercase : str = len(_snake_case )
for i in range(len(_snake_case ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_lowercase : Union[str, Any] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(_snake_case , _snake_case , _snake_case , _snake_case )
index += 1
_lowercase : Dict = ""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
_lowercase : str = lexicon[curr_string]
result += last_match_id
return result
def UpperCamelCase ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> str:
'''simple docstring'''
_lowercase : int = os.path.getsize(_snake_case )
_lowercase : List[Any] = bin(_snake_case )[2:]
_lowercase : Dict = len(_snake_case )
return "0" * (length_length - 1) + file_length_binary + compressed
def UpperCamelCase ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> None:
'''simple docstring'''
_lowercase : Any = 8
try:
with open(_snake_case , "wb" ) as opened_file:
_lowercase : Union[str, Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(_snake_case ) , _snake_case )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(_snake_case , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def UpperCamelCase ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> None:
'''simple docstring'''
_lowercase : List[Any] = read_file_binary(_snake_case )
_lowercase : Dict = compress_data(_snake_case )
_lowercase : Dict = add_file_length(_snake_case , _snake_case )
write_file_binary(_snake_case , _snake_case )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 461 |
"""simple docstring"""
from manim import *
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = Rectangle(height=0.5 , width=0.5 )
_A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_A = Rectangle(height=0.25 , width=0.25 )
_A = [mem.copy() for i in range(6 )]
_A = [mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('CPU' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_UpperCAmelCase )
_A = [mem.copy() for i in range(4 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('GPU' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(_UpperCAmelCase )
_A = [mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('Model' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(_UpperCAmelCase )
_A = []
_A = []
for i, rect in enumerate(_UpperCAmelCase ):
_A = fill.copy().set_fill(_UpperCAmelCase , opacity=0.8 )
target.move_to(_UpperCAmelCase )
model_arr.append(_UpperCAmelCase )
_A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_UpperCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(_UpperCAmelCase )
self.add(*_UpperCAmelCase , *_UpperCAmelCase )
_A = [meta_mem.copy() for i in range(6 )]
_A = [meta_mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('Disk' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
disk.move_to([-4, -1.25, 0] )
self.add(_UpperCAmelCase , _UpperCAmelCase )
_A = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_A = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_UpperCAmelCase , _UpperCAmelCase )
_A = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(_UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_UpperCAmelCase )
_A = MarkupText(
F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase ) )
_A = Square(0.3 )
input.set_fill(_UpperCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , _UpperCAmelCase , buff=0.5 )
self.play(Write(_UpperCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=_UpperCAmelCase , buff=0.02 )
self.play(MoveToTarget(_UpperCAmelCase ) )
self.play(FadeOut(_UpperCAmelCase ) )
_A = Arrow(start=_UpperCAmelCase , end=_UpperCAmelCase , color=_UpperCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , _UpperCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_A = MarkupText(
F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase , run_time=3 ) )
_A = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(_UpperCAmelCase ) , Circumscribe(model_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
_A = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , _UpperCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_A = AnimationGroup(
FadeOut(_UpperCAmelCase , run_time=0.5 ) , MoveToTarget(_UpperCAmelCase , run_time=0.5 ) , FadeIn(_UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(_UpperCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_A = 0.7
self.play(
Circumscribe(model_arr[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
_A = a_c
_A = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(_UpperCAmelCase ) , FadeOut(_UpperCAmelCase , run_time=0.5 ) , )
_A = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase , run_time=3 ) , MoveToTarget(_UpperCAmelCase ) )
self.wait()
| 7 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
'''configuration_trajectory_transformer''': [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TrajectoryTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrajectoryTransformerModel''',
'''TrajectoryTransformerPreTrainedModel''',
'''load_tf_weights_in_trajectory_transformer''',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 565 |
"""simple docstring"""
def _snake_case ( _snake_case : int , _snake_case : int ) -> int:
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def _snake_case ( ) -> None:
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 7 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : int = {
"""configuration_upernet""": ["""UperNetConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
"""UperNetForSemanticSegmentation""",
"""UperNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
a__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 589 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
a = logging.getLogger(__name__)
@dataclass
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[float] = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
UpperCAmelCase : bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
UpperCAmelCase : bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
UpperCAmelCase : bool = field(default=__lowerCAmelCase , metadata={'''help''': '''whether to use adafactor'''} )
UpperCAmelCase : Optional[float] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
UpperCAmelCase : Optional[float] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
UpperCAmelCase : Optional[float] = field(default=__lowerCAmelCase , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
UpperCAmelCase : Optional[float] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
UpperCAmelCase : Optional[str] = field(
default='''linear''' , metadata={'''help''': f'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
| 7 | 0 |
from ...configuration_utils import PretrainedConfig
class snake_case_ ( __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase = '''bert-generation'''
def __init__( self : Optional[int] , __magic_name__ : Optional[int]=5_0358 , __magic_name__ : Optional[Any]=1024 , __magic_name__ : Optional[int]=24 , __magic_name__ : int=16 , __magic_name__ : List[Any]=4096 , __magic_name__ : List[Any]="gelu" , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : Tuple=0.1 , __magic_name__ : List[str]=512 , __magic_name__ : Union[str, Any]=0.02 , __magic_name__ : Optional[int]=1e-12 , __magic_name__ : Any=0 , __magic_name__ : Optional[int]=2 , __magic_name__ : str=1 , __magic_name__ : str="absolute" , __magic_name__ : int=True , **__magic_name__ : Union[str, Any] , ) -> List[Any]:
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
lowerCamelCase_ : Dict = vocab_size
lowerCamelCase_ : str = hidden_size
lowerCamelCase_ : Union[str, Any] = num_hidden_layers
lowerCamelCase_ : List[str] = num_attention_heads
lowerCamelCase_ : Optional[Any] = hidden_act
lowerCamelCase_ : Any = intermediate_size
lowerCamelCase_ : int = hidden_dropout_prob
lowerCamelCase_ : List[Any] = attention_probs_dropout_prob
lowerCamelCase_ : Tuple = max_position_embeddings
lowerCamelCase_ : Tuple = initializer_range
lowerCamelCase_ : int = layer_norm_eps
lowerCamelCase_ : Optional[Any] = position_embedding_type
lowerCamelCase_ : List[str] = use_cache
| 488 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
a = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 7 | 0 |
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def lowerCamelCase_ ( ):
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1)) | 483 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : UNetaDModel
UpperCAmelCase : KarrasVeScheduler
def __init__( self : Any , _UpperCAmelCase : UNetaDModel , _UpperCAmelCase : KarrasVeScheduler ):
super().__init__()
self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
@torch.no_grad()
def __call__( self : Optional[int] , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 50 , _UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , **_UpperCAmelCase : Optional[Any] , ):
_A = self.unet.config.sample_size
_A = (batch_size, 3, img_size, img_size)
_A = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_A = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
_A = self.scheduler.schedule[t]
_A = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_A , _A = self.scheduler.add_noise_to_input(_UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_A = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_A = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_A = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
_A = self.scheduler.step_correct(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , step_output.prev_sample , step_output['derivative'] , )
_A = step_output.prev_sample
_A = (sample / 2 + 0.5).clamp(0 , 1 )
_A = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_A = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCAmelCase )
| 7 | 0 |
'''simple docstring'''
from __future__ import annotations
SCREAMING_SNAKE_CASE = list[list[int]]
# assigning initial values to the grid
SCREAMING_SNAKE_CASE = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
SCREAMING_SNAKE_CASE = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowercase_ ( __A : Matrix , __A : int , __A : int , __A : int ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowercase_ ( __A : Matrix ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowercase_ ( __A : Matrix ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(_snake_case ):
lowercase , lowercase : Dict =location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 1_0 ):
if is_safe(_snake_case , _snake_case , _snake_case , _snake_case ):
lowercase : Any =digit
if sudoku(_snake_case ) is not None:
return grid
lowercase : List[str] =0
return None
def lowercase_ ( __A : Matrix ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(_snake_case , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
SCREAMING_SNAKE_CASE = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 94 |
"""simple docstring"""
class lowercase_ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ):
_A = None
_A = None
_A = graph
self._normalize_graph(_UpperCAmelCase , _UpperCAmelCase )
_A = len(_UpperCAmelCase )
_A = None
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict ):
if sources is int:
_A = [sources]
if sinks is int:
_A = [sinks]
if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) == 0:
return
_A = sources[0]
_A = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(_UpperCAmelCase ) > 1 or len(_UpperCAmelCase ) > 1:
_A = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
_A = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_A = max_input_flow
_A = 0
_A = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_A = max_input_flow
_A = size - 1
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : Union[str, Any] ):
_A = algorithm(self )
class lowercase_ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Union[str, Any] ):
_A = flow_network
_A = flow_network.verticesCount
_A = flow_network.sourceIndex
_A = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_A = flow_network.graph
_A = False
def lowerCAmelCase_ ( self : Optional[Any] ):
if not self.executed:
self._algorithm()
_A = True
def lowerCAmelCase_ ( self : int ):
pass
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , _UpperCAmelCase : Any ):
super().__init__(_UpperCAmelCase )
# use this to save your result
_A = -1
def lowerCAmelCase_ ( self : Optional[Any] ):
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : List[Any] ):
super().__init__(_UpperCAmelCase )
_A = [[0] * self.verticies_count for i in range(self.verticies_count )]
_A = [0] * self.verticies_count
_A = [0] * self.verticies_count
def lowerCAmelCase_ ( self : Dict ):
_A = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_A = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_A = 0
while i < len(_UpperCAmelCase ):
_A = vertices_list[i]
_A = self.heights[vertex_index]
self.process_vertex(_UpperCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(_UpperCAmelCase ) )
_A = 0
else:
i += 1
_A = sum(self.preflow[self.source_index] )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Any ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(_UpperCAmelCase , _UpperCAmelCase )
self.relabel(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple ):
_A = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : int ):
_A = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_A = self.heights[to_index]
if min_height is not None:
_A = min_height + 1
if __name__ == "__main__":
a = [0]
a = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
a = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
a = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
a = flow_network.find_maximum_flow()
print(F'''maximum flow is {maximum_flow}''')
| 7 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowercase : List[str] = logging.get_logger(__name__)
_lowercase : Optional[Any] = "▁"
_lowercase : Any = {"vocab_file": "sentencepiece.bpe.model"}
_lowercase : Any = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
_lowercase : Dict = {
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
_lowercase : str = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class _UpperCamelCase ( __lowerCAmelCase ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
lowerCAmelCase = []
lowerCAmelCase = []
def __init__( self , a__ , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , a__=None , a__=None , a__=None , a__ = None , a__=None , a__=False , **a__ , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
A = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
A = {} if sp_model_kwargs is None else sp_model_kwargs
A = legacy_behaviour
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , src_lang=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=_UpperCAmelCase , **_UpperCAmelCase , )
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCAmelCase ) )
A = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
A = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
A = 1
A = len(self.sp_model )
A = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_UpperCAmelCase )
}
A = {v: k for k, v in self.lang_code_to_id.items()}
A = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
A = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
A = src_lang if src_lang is not None else """eng_Latn"""
A = self.lang_code_to_id[self._src_lang]
A = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Tuple:
A = self.__dict__.copy()
A = None
A = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , a__ ) -> Any:
A = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _UpperCAmelCase ( self ) -> Tuple:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _UpperCAmelCase ( self ) -> Optional[Any]:
return self._src_lang
@src_lang.setter
def _UpperCAmelCase ( self , a__ ) -> List[str]:
A = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCAmelCase ( self , a__ , a__ = None , a__ = False ) -> List[Any]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
A = [1] * len(self.prefix_tokens )
A = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones
def _UpperCAmelCase ( self , a__ , a__ = None ) -> Optional[Any]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCAmelCase ( self , a__ , a__ = None ) -> Optional[int]:
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__ , **a__ ) -> int:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
A = src_lang
A = self(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
A = self.convert_tokens_to_ids(_UpperCAmelCase )
A = tgt_lang_id
return inputs
def _UpperCAmelCase ( self ) -> Optional[Any]:
A = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCAmelCase ( self , a__ ) -> str:
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def _UpperCAmelCase ( self , a__ ) -> Optional[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A = self.sp_model.PieceToId(_UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _UpperCAmelCase ( self , a__ ) -> int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _UpperCAmelCase ( self , a__ ) -> List[str]:
A = """""".join(_UpperCAmelCase ).replace(_UpperCAmelCase , """ """ ).strip()
return out_string
def _UpperCAmelCase ( self , a__ , a__ = None ) -> List[Any]:
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , """wb""" ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
def _UpperCAmelCase ( self , a__ , a__ = "eng_Latn" , a__ = None , a__ = "fra_Latn" , **a__ , ) -> Optional[Any]:
A = src_lang
A = tgt_lang
return super().prepare_seqaseq_batch(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> int:
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCAmelCase ( self ) -> Tuple:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCAmelCase ( self , a__ ) -> List[Any]:
A = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
A = []
A = [self.eos_token_id, self.cur_lang_code]
else:
A = [self.cur_lang_code]
A = [self.eos_token_id]
def _UpperCAmelCase ( self , a__ ) -> str:
A = self.lang_code_to_id[lang]
if self.legacy_behaviour:
A = []
A = [self.eos_token_id, self.cur_lang_code]
else:
A = [self.cur_lang_code]
A = [self.eos_token_id]
| 641 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class lowercase_ ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = SpeechTaTokenizer
UpperCAmelCase : Tuple = False
UpperCAmelCase : Optional[int] = True
def lowerCAmelCase_ ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
_A = SpeechTaTokenizer(_UpperCAmelCase )
_A = AddedToken('<mask>' , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase )
_A = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Tuple ):
_A = 'this is a test'
_A = 'this is a test'
return input_text, output_text
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Dict=20 , _UpperCAmelCase : str=5 ):
_A , _A = self.get_input_output_texts(_UpperCAmelCase )
_A = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
_A = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
return text, ids
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = '<pad>'
_A = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-4] , 'œ' )
self.assertEqual(vocab_keys[-2] , '<mask>' )
self.assertEqual(vocab_keys[-1] , '<ctc_blank>' )
self.assertEqual(len(_UpperCAmelCase ) , 81 )
def lowerCAmelCase_ ( self : Optional[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def lowerCAmelCase_ ( self : Any ):
_A = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A = tokenizer.vocab_size
_A = len(_UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_A = ['aaaaa bbbbbb', 'cccccccccdddddddd']
_A = tokenizer.add_tokens(_UpperCAmelCase )
_A = tokenizer.vocab_size
_A = len(_UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , 0 )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , len(_UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , all_size + len(_UpperCAmelCase ) )
_A = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=_UpperCAmelCase )
self.assertGreaterEqual(len(_UpperCAmelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
_A = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
_A = tokenizer.add_special_tokens(_UpperCAmelCase )
_A = tokenizer.vocab_size
_A = len(_UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , 0 )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , len(_UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , all_size_a + len(_UpperCAmelCase ) )
_A = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=_UpperCAmelCase )
self.assertGreaterEqual(len(_UpperCAmelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def lowerCAmelCase_ ( self : str ):
pass
def lowerCAmelCase_ ( self : Any ):
pass
def lowerCAmelCase_ ( self : Dict ):
_A = self.get_tokenizer()
_A = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(_UpperCAmelCase , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
_A = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_UpperCAmelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
_A = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
# fmt: off
self.assertListEqual(_UpperCAmelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
_A = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def lowerCAmelCase_ ( self : List[Any] ):
# Use custom sequence because this tokenizer does not handle numbers.
_A = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
_A = {
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=_UpperCAmelCase , )
| 7 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'facebook/nllb-moe-54B': 'https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json',
}
class _lowerCamelCase ( __lowerCAmelCase ):
'''simple docstring'''
__lowercase : str = '''nllb-moe'''
__lowercase : Optional[int] = ['''past_key_values''']
__lowercase : Union[str, Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , __lowercase=128_112 , __lowercase=1_024 , __lowercase=12 , __lowercase=4_096 , __lowercase=16 , __lowercase=12 , __lowercase=4_096 , __lowercase=16 , __lowercase=0.0_5 , __lowercase=0.0_5 , __lowercase=True , __lowercase=True , __lowercase="relu" , __lowercase=1_024 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0_2 , __lowercase=2 , __lowercase=True , __lowercase=False , __lowercase="float32" , __lowercase=False , __lowercase=128 , __lowercase=64 , __lowercase=4 , __lowercase=4 , __lowercase=0.0_0_1 , __lowercase=0.0_0_1 , __lowercase="all" , __lowercase=False , __lowercase=False , __lowercase=1.0 , __lowercase=0.2 , __lowercase=1 , __lowercase=0 , __lowercase=2 , __lowercase=False , **__lowercase , ):
"""simple docstring"""
__A : Tuple = vocab_size
__A : List[str] = max_position_embeddings
__A : Union[str, Any] = d_model
__A : int = encoder_ffn_dim
__A : List[str] = encoder_layers
__A : List[str] = encoder_attention_heads
__A : str = decoder_ffn_dim
__A : Any = decoder_layers
__A : str = decoder_attention_heads
__A : Tuple = dropout
__A : str = attention_dropout
__A : Tuple = activation_dropout
__A : Union[str, Any] = activation_function
__A : List[str] = init_std
__A : Dict = encoder_layerdrop
__A : Any = decoder_layerdrop
__A : str = use_cache
__A : Optional[int] = encoder_layers
__A : int = scale_embedding # scale factor will be sqrt(d_model) if True
__A : Any = router_z_loss_coef
__A : List[Any] = router_aux_loss_coef
__A : Union[str, Any] = decoder_sparse_step
__A : Union[str, Any] = encoder_sparse_step
__A : List[Any] = num_experts
__A : Optional[int] = expert_capacity
__A : Any = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"""`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}""" )
__A : Optional[Any] = router_dtype
__A : Any = router_ignore_padding_tokens
__A : Optional[int] = batch_prioritized_routing
__A : Union[str, Any] = second_expert_policy
__A : List[Any] = normalize_router_prob_before_dropping
__A : int = moe_eval_capacity_token_fraction
__A : List[Any] = moe_token_dropout
__A : Tuple = output_router_logits
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
| 365 |
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 7 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__magic_name__ : List[Any] = None
__magic_name__ : Optional[int] = logging.get_logger(__name__)
__magic_name__ : Dict = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
__magic_name__ : Tuple = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""",
"""facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""",
},
}
__magic_name__ : Optional[int] = {
"""facebook/mbart-large-en-ro""": 1_0_2_4,
"""facebook/mbart-large-cc25""": 1_0_2_4,
}
# fmt: off
__magic_name__ : int = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class lowercase__ ( __lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase : int = VOCAB_FILES_NAMES
__lowerCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[str] = ['''input_ids''', '''attention_mask''']
__lowerCAmelCase : Tuple = MBartTokenizer
__lowerCAmelCase : List[int] = []
__lowerCAmelCase : List[int] = []
def __init__( self , _A=None , _A=None , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A=None , _A=None , _A=None , **_A , ):
'''simple docstring'''
UpperCamelCase : List[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
super().__init__(
vocab_file=_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , src_lang=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
UpperCamelCase : List[Any] = vocab_file
UpperCamelCase : str = False if not self.vocab_file else True
UpperCamelCase : Dict = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
UpperCamelCase : List[Any] = {
lang_code: self.convert_tokens_to_ids(_UpperCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCamelCase : Tuple = src_lang if src_lang is not None else """en_XX"""
UpperCamelCase : Dict = self.convert_tokens_to_ids(self._src_lang )
UpperCamelCase : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _a ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _a ( self , _A ):
'''simple docstring'''
UpperCamelCase : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a ( self , _A , _A = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a ( self , _A , _A = None ):
'''simple docstring'''
UpperCamelCase : Any = [self.sep_token_id]
UpperCamelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self , _A , _A , _A , _A , **_A ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
UpperCamelCase : str = src_lang
UpperCamelCase : str = self(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase : Optional[Any] = self.convert_tokens_to_ids(_UpperCAmelCase )
UpperCamelCase : Optional[int] = tgt_lang_id
return inputs
def _a ( self , _A , _A = "en_XX" , _A = None , _A = "ro_RO" , **_A , ):
'''simple docstring'''
UpperCamelCase : str = src_lang
UpperCamelCase : Dict = tgt_lang
return super().prepare_seqaseq_batch(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
def _a ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def _a ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a ( self , _A ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.convert_tokens_to_ids(_UpperCAmelCase )
UpperCamelCase : int = []
UpperCamelCase : List[str] = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase : int = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : Dict = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : Dict = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _a ( self , _A ):
'''simple docstring'''
UpperCamelCase : Tuple = self.convert_tokens_to_ids(_UpperCAmelCase )
UpperCamelCase : List[str] = []
UpperCamelCase : Any = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _a ( self , _A , _A = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
UpperCamelCase : Union[str, Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
return (out_vocab_file,)
| 102 |
"""simple docstring"""
import argparse
a = '''docs/source/_static/js/custom.js'''
def _snake_case ( _snake_case : Dict ) -> Any:
'''simple docstring'''
with open(_snake_case , encoding='utf-8' , newline='\n' ) as f:
_A = f.readlines()
_A = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
_A = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(_snake_case , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(_snake_case )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
a = parser.parse_args()
update_custom_js(args.version)
| 7 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowerCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 585 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = '''vit_mae'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : Optional[int]=768 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Optional[int]=3_072 , _UpperCAmelCase : Any="gelu" , _UpperCAmelCase : Optional[Any]=0.0 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : List[Any]=1E-1_2 , _UpperCAmelCase : Optional[Any]=224 , _UpperCAmelCase : int=16 , _UpperCAmelCase : str=3 , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : int=16 , _UpperCAmelCase : str=512 , _UpperCAmelCase : int=8 , _UpperCAmelCase : List[Any]=2_048 , _UpperCAmelCase : Optional[Any]=0.75 , _UpperCAmelCase : List[str]=False , **_UpperCAmelCase : Union[str, Any] , ):
super().__init__(**_UpperCAmelCase )
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = initializer_range
_A = layer_norm_eps
_A = image_size
_A = patch_size
_A = num_channels
_A = qkv_bias
_A = decoder_num_attention_heads
_A = decoder_hidden_size
_A = decoder_num_hidden_layers
_A = decoder_intermediate_size
_A = mask_ratio
_A = norm_pix_loss
| 7 | 0 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _SCREAMING_SNAKE_CASE( __lowerCAmelCase ):
A_ : Dict = ''''''
A_ : List[Any] = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self : Optional[int] , UpperCamelCase_ : Optional[DatasetInfo] = None , UpperCamelCase_ : Optional[str] = None , **UpperCamelCase_ : Union[str, Any] , ) -> str:
super().__init__(self , **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = repo_info
SCREAMING_SNAKE_CASE__ :List[str] = token
SCREAMING_SNAKE_CASE__ :str = None
def __lowerCamelCase ( self : Union[str, Any] ) -> Tuple:
if self.dir_cache is None:
SCREAMING_SNAKE_CASE__ :List[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
SCREAMING_SNAKE_CASE__ :Any = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(_UpperCAmelCase ): {'name': str(_UpperCAmelCase ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __lowerCamelCase ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : str = "rb" , **UpperCamelCase_ : int , ) -> int:
if not isinstance(self.repo_info , _UpperCAmelCase ):
raise NotImplementedError(f'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
SCREAMING_SNAKE_CASE__ :Optional[Any] = hf_hub_url(self.repo_info.id , _UpperCAmelCase , revision=self.repo_info.sha )
return fsspec.open(
_UpperCAmelCase , mode=_UpperCAmelCase , headers=get_authentication_headers_for_url(_UpperCAmelCase , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open()
def __lowerCamelCase ( self : Any , UpperCamelCase_ : int , **UpperCamelCase_ : Optional[int] ) -> List[Any]:
self._get_dirs()
SCREAMING_SNAKE_CASE__ :int = self._strip_protocol(_UpperCAmelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_UpperCAmelCase )
def __lowerCamelCase ( self : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict=False , **UpperCamelCase_ : Tuple ) -> Optional[Any]:
self._get_dirs()
SCREAMING_SNAKE_CASE__ :Tuple = PurePosixPath(path.strip('/' ) )
SCREAMING_SNAKE_CASE__ :Optional[Any] = {}
for p, f in self.dir_cache.items():
SCREAMING_SNAKE_CASE__ :List[Any] = PurePosixPath(p.strip('/' ) )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = p.parent
if root == path:
SCREAMING_SNAKE_CASE__ :Dict = f
SCREAMING_SNAKE_CASE__ :str = list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 209 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
a = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def _snake_case ( _snake_case : Optional[Any] ) -> str:
'''simple docstring'''
_A = torch.load(_snake_case , map_location='cpu' )
return sd
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : str , _snake_case : Tuple=rename_keys_prefix ) -> List[str]:
'''simple docstring'''
_A = OrderedDict()
_A = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_A = key
for name_pair in rename_keys_prefix:
_A = new_key.replace(name_pair[0] , name_pair[1] )
_A = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_A = new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def _snake_case ( _snake_case : List[str] , _snake_case : Dict ) -> Dict:
'''simple docstring'''
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
_A = 'pretraining'
if "vcr" in checkpoint_path:
_A = {'visual_embedding_dim': 5_12}
elif "vqa_advanced" in checkpoint_path:
_A = {'visual_embedding_dim': 20_48}
elif "vqa" in checkpoint_path:
_A = {'visual_embedding_dim': 20_48}
elif "nlvr" in checkpoint_path:
_A = {'visual_embedding_dim': 10_24}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
_A = {'visual_embedding_dim': 5_12}
_A = 'multichoice'
elif "vqa_advanced" in checkpoint_path:
_A = {'visual_embedding_dim': 20_48}
_A = 'vqa_advanced'
elif "vqa" in checkpoint_path:
_A = {'visual_embedding_dim': 20_48, 'num_labels': 31_29}
_A = 'vqa'
elif "nlvr" in checkpoint_path:
_A = {
'visual_embedding_dim': 10_24,
'num_labels': 2,
}
_A = 'nlvr'
_A = VisualBertConfig(**_snake_case )
# Load State Dict
_A = load_state_dict(_snake_case )
_A = get_new_dict(_snake_case , _snake_case )
if model_type == "pretraining":
_A = VisualBertForPreTraining(_snake_case )
elif model_type == "vqa":
_A = VisualBertForQuestionAnswering(_snake_case )
elif model_type == "nlvr":
_A = VisualBertForVisualReasoning(_snake_case )
elif model_type == "multichoice":
_A = VisualBertForMultipleChoice(_snake_case )
model.load_state_dict(_snake_case )
# Save Checkpoints
Path(_snake_case ).mkdir(exist_ok=_snake_case )
model.save_pretrained(_snake_case )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
a = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 7 | 0 |
def UpperCamelCase ( _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any]=False ) -> List[Any]:
'''simple docstring'''
if isinstance(_snake_case , _snake_case ) and isinstance(_snake_case , _snake_case ):
_lowercase : List[Any] = len(set_a.intersection(_snake_case ) )
if alternative_union:
_lowercase : List[str] = len(_snake_case ) + len(_snake_case )
else:
_lowercase : Union[str, Any] = len(set_a.union(_snake_case ) )
return intersection / union
if isinstance(_snake_case , (list, tuple) ) and isinstance(_snake_case , (list, tuple) ):
_lowercase : List[str] = [element for element in set_a if element in set_b]
if alternative_union:
_lowercase : Any = len(_snake_case ) + len(_snake_case )
return len(_snake_case ) / union
else:
_lowercase : int = set_a + [element for element in set_b if element not in set_a]
return len(_snake_case ) / len(_snake_case )
return len(_snake_case ) / len(_snake_case )
return None
if __name__ == "__main__":
UpperCamelCase_ : Optional[int] = {"""a""", """b""", """c""", """d""", """e"""}
UpperCamelCase_ : Tuple = {"""c""", """d""", """e""", """f""", """h""", """i"""}
print(jaccard_similarity(set_a, set_b))
| 461 |
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _snake_case ( _snake_case : Dict ) -> Optional[Any]:
'''simple docstring'''
for param in module.parameters():
_A = False
def _snake_case ( ) -> Tuple:
'''simple docstring'''
_A = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_A = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def _snake_case ( _snake_case : Dict ) -> Optional[Any]:
'''simple docstring'''
_A = plt.imshow(_snake_case )
fig.axes.get_xaxis().set_visible(_snake_case )
fig.axes.get_yaxis().set_visible(_snake_case )
plt.show()
def _snake_case ( ) -> Optional[Any]:
'''simple docstring'''
_A = datetime.now()
_A = current_time.strftime('%H:%M:%S' )
return timestamp
| 7 | 0 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : str = 0
# if input_string is "aba" than new_input_string become "a|b|a"
lowerCAmelCase__ : Dict = """"""
lowerCAmelCase__ : List[Any] = """"""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(_snake_case ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = 0, 0
# length[i] shows the length of palindromic substring with center i
lowerCAmelCase__ : Tuple = [1 for i in range(len(_snake_case ) )]
# for each character in new_string find corresponding palindromic string
lowerCAmelCase__ : int = 0
for j in range(len(_snake_case ) ):
lowerCAmelCase__ : str = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(_snake_case )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
lowerCAmelCase__ : Dict = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
lowerCAmelCase__ : Union[str, Any] = j - k + 1 # noqa: E741
lowerCAmelCase__ : List[str] = j + k - 1
# update max_length and start position
if max_length < length[j]:
lowerCAmelCase__ : int = length[j]
lowerCAmelCase__ : Optional[Any] = j
# create that string
lowerCAmelCase__ : List[str] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 565 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Any = ['''image_processor''', '''tokenizer''']
UpperCAmelCase : Optional[int] = '''ViTImageProcessor'''
UpperCAmelCase : int = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Tuple , _UpperCAmelCase : int=None , _UpperCAmelCase : Tuple=None , **_UpperCAmelCase : Dict ):
_A = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
_A = kwargs.pop('feature_extractor' )
_A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : Optional[Any] , _UpperCAmelCase : int=None , _UpperCAmelCase : int=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[int]=None , **_UpperCAmelCase : Union[str, Any] ):
if text is None and visual_prompt is None and images is None:
raise ValueError('You have to specify either text, visual prompt or images.' )
if text is not None and visual_prompt is not None:
raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.' )
if text is not None:
_A = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if visual_prompt is not None:
_A = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if images is not None:
_A = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if visual_prompt is not None and images is not None:
_A = {
'pixel_values': image_features.pixel_values,
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
_A = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
_A = {
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def lowerCAmelCase_ ( self : str , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : Union[str, Any] ):
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ):
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowerCAmelCase_ ( self : Dict ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def lowerCAmelCase_ ( self : Tuple ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor
| 7 | 0 |
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( __lowerCAmelCase ):
UpperCamelCase : Dict = (PNDMScheduler,)
UpperCamelCase : Any = (('''num_inference_steps''', 50),)
def _lowerCamelCase ( self , **__magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**_UpperCAmelCase )
return config
def _lowerCamelCase ( self , __magic_name__=0 , **__magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop('num_inference_steps' , _UpperCAmelCase )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config(**_UpperCAmelCase )
_lowerCAmelCase = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
_lowerCAmelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
_lowerCAmelCase = scheduler_class.from_pretrained(_UpperCAmelCase )
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
_lowerCAmelCase = dummy_past_residuals[:]
_lowerCAmelCase = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_lowerCAmelCase = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_lowerCAmelCase = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_lowerCAmelCase = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
def _lowerCamelCase ( self , __magic_name__=0 , **__magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop('num_inference_steps' , _UpperCAmelCase )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
_lowerCAmelCase = scheduler_class.from_pretrained(_UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase = dummy_past_residuals[:]
_lowerCAmelCase = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_lowerCAmelCase = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_lowerCAmelCase = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_lowerCAmelCase = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _lowerCamelCase ( self , **__magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(**_UpperCAmelCase )
_lowerCAmelCase = scheduler_class(**_UpperCAmelCase )
_lowerCAmelCase = 1_0
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCAmelCase )
for i, t in enumerate(scheduler.prk_timesteps ):
_lowerCAmelCase = model(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_lowerCAmelCase = model(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
return sample
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop('num_inference_steps' , _UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_UpperCAmelCase )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCAmelCase , 'set_timesteps' ):
scheduler.set_timesteps(_UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(_UpperCAmelCase , 'set_timesteps' ):
_lowerCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_lowerCAmelCase = dummy_past_residuals[:]
_lowerCAmelCase = scheduler.step_prk(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_lowerCAmelCase = scheduler.step_prk(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_lowerCAmelCase = scheduler.step_plms(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_lowerCAmelCase = scheduler.step_plms(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _lowerCamelCase ( self ):
"""simple docstring"""
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_UpperCAmelCase )
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(steps_offset=1 )
_lowerCAmelCase = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(1_0 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ) , )
def _lowerCamelCase ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
for t in [1, 5, 1_0]:
self.check_over_forward(time_step=_UpperCAmelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 2_7
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_lowerCAmelCase = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
def _lowerCamelCase ( self ):
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ):
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_UpperCAmelCase )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.full_loop()
_lowerCAmelCase = torch.sum(torch.abs(_UpperCAmelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 1_98.13_18 ) < 1e-2
assert abs(result_mean.item() - 0.25_80 ) < 1e-3
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.full_loop(prediction_type='v_prediction' )
_lowerCAmelCase = torch.sum(torch.abs(_UpperCAmelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 67.39_86 ) < 1e-2
assert abs(result_mean.item() - 0.08_78 ) < 1e-3
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 )
_lowerCAmelCase = torch.sum(torch.abs(_UpperCAmelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 2_30.03_99 ) < 1e-2
assert abs(result_mean.item() - 0.29_95 ) < 1e-3
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 )
_lowerCAmelCase = torch.sum(torch.abs(_UpperCAmelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 1_86.94_82 ) < 1e-2
assert abs(result_mean.item() - 0.24_34 ) < 1e-3
| 589 |
"""simple docstring"""
import math
from datetime import datetime, timedelta
def _snake_case ( _snake_case : int ) -> datetime:
'''simple docstring'''
_A = year % 19
_A = year % 4
_A = year % 7
_A = math.floor(year / 1_00 )
_A = math.floor((13 + 8 * leap_day_inhibits) / 25 )
_A = leap_day_inhibits / 4
_A = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
_A = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
_A = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
_A = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(_snake_case , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(_snake_case , 4 , 18 )
else:
return datetime(_snake_case , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_994, 2_000, 2_010, 2_021, 2_023):
a = '''will be''' if year > datetime.now().year else '''was'''
print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
| 7 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=18 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=True , ):
'''simple docstring'''
__A : str = parent
__A : Dict = batch_size
__A : str = num_channels
__A : str = image_size
__A : str = min_resolution
__A : Tuple = max_resolution
__A : List[str] = do_resize
__A : Tuple = size_divisor
__A : Union[str, Any] = do_rescale
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (a__ , unittest.TestCase ):
lowerCAmelCase = GLPNImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = GLPNImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize'))
self.assertTrue(hasattr(_UpperCAmelCase , 'size_divisor'))
self.assertTrue(hasattr(_UpperCAmelCase , 'resample'))
self.assertTrue(hasattr(_UpperCAmelCase , 'do_rescale'))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image)
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0)
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__A : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray)
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A : int = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0)
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor)
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A : Dict = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0)
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0) | 8 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __snake_case : list[int] , __snake_case : list[int] , __snake_case : int ) -> tuple[float, list[float]]:
__A : int = list(range(len(__snake_case ) ) )
__A : Optional[Any] = [v / w for v, w in zip(__snake_case , __snake_case )]
index.sort(key=lambda __snake_case : ratio[i] , reverse=__snake_case )
__A : float = 0
__A : list[float] = [0] * len(__snake_case )
for i in index:
if weight[i] <= capacity:
__A : Optional[int] = 1
max_value += value[i]
capacity -= weight[i]
else:
__A : List[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class SCREAMING_SNAKE_CASE (a__ , unittest.TestCase ):
lowerCAmelCase = FlaxAutoencoderKL
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = 4
__A : Union[str, Any] = 3
__A : Optional[int] = (32, 32)
__A : Any = jax.random.PRNGKey(0)
__A : Dict = jax.random.uniform(_UpperCAmelCase , ((batch_size, num_channels) + sizes))
return {"sample": image, "prng_key": prng_key}
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
__A : Tuple = self.dummy_input
return init_dict, inputs_dict | 8 |
'''simple docstring'''
from __future__ import annotations
import math
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : int = size
# approximate the overall size of segment tree with given value
__A : Optional[Any] = [0 for i in range(0 , 4 * size)]
# create array to store lazy update
__A : Optional[Any] = [0 for i in range(0 , 4 * size)]
__A : str = [0 for i in range(0 , 4 * size)] # flag for lazy update
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return idx * 2
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return idx * 2 + 1
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if left_element == right_element:
__A : List[Any] = a[left_element - 1]
else:
__A : List[str] = (left_element + right_element) // 2
self.build(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
self.build(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase)
__A : Any = max(
self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)])
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if self.flag[idx] is True:
__A : Optional[Any] = self.lazy[idx]
__A : Optional[Any] = False
if left_element != right_element:
__A : List[Any] = self.lazy[idx]
__A : Dict = self.lazy[idx]
__A : Tuple = True
__A : Union[str, Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
__A : Optional[int] = val
if left_element != right_element:
__A : Tuple = val
__A : Any = val
__A : Tuple = True
__A : Union[str, Any] = True
return True
__A : str = (left_element + right_element) // 2
self.update(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
self.update(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : int = max(
self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)])
return True
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if self.flag[idx] is True:
__A : Union[str, Any] = self.lazy[idx]
__A : List[str] = False
if left_element != right_element:
__A : Union[str, Any] = self.lazy[idx]
__A : Optional[int] = self.lazy[idx]
__A : str = True
__A : Union[str, Any] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
__A : Any = (left_element + right_element) // 2
__A : int = self.query(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : Union[str, Any] = self.query(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
return max(_UpperCAmelCase , _UpperCAmelCase)
def __str__( self):
'''simple docstring'''
return str([self.query(1 , 1 , self.size , _UpperCAmelCase , _UpperCAmelCase) for i in range(1 , self.size + 1)])
if __name__ == "__main__":
lowercase__ : Union[str, Any] = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
lowercase__ : str = 15
lowercase__ : List[Any] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 1_11)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 2_35)
print(segt) | 8 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Optional[Any] = logging.get_logger(__name__)
lowercase__ : List[Any] = {
'''SCUT-DLVCLab/lilt-roberta-en-base''': (
'''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''lilt'''
def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=None , _UpperCAmelCase=4 , _UpperCAmelCase=1024 , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase)
__A : str = vocab_size
__A : Any = hidden_size
__A : Optional[int] = num_hidden_layers
__A : List[Any] = num_attention_heads
__A : Union[str, Any] = hidden_act
__A : Union[str, Any] = intermediate_size
__A : str = hidden_dropout_prob
__A : str = attention_probs_dropout_prob
__A : Any = max_position_embeddings
__A : Tuple = type_vocab_size
__A : Optional[int] = initializer_range
__A : Union[str, Any] = layer_norm_eps
__A : str = position_embedding_type
__A : int = classifier_dropout
__A : int = channel_shrink_ratio
__A : int = max_ad_position_embeddings | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int , __snake_case : int , __snake_case : int ) -> float:
__A : Dict = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _lowerCAmelCase ( ) -> Union[str, Any]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Optional[int]:
__A : List[str] = 0
__A : Optional[int] = len(__snake_case )
for i in range(n - 1 ):
for j in range(i + 1 , __snake_case ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _lowerCAmelCase ( __snake_case : Tuple ) -> Optional[Any]:
if len(__snake_case ) <= 1:
return arr, 0
__A : List[Any] = len(__snake_case ) // 2
__A : int = arr[0:mid]
__A : List[Any] = arr[mid:]
__A ,__A : Optional[Any] = count_inversions_recursive(__snake_case )
__A ,__A : Any = count_inversions_recursive(__snake_case )
__A ,__A : str = _count_cross_inversions(__snake_case , __snake_case )
__A : int = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : List[str] ) -> Tuple:
__A : List[str] = []
__A : Optional[int] = 0
while i < len(__snake_case ) and j < len(__snake_case ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__snake_case ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__snake_case ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _lowerCAmelCase ( ) -> Any:
__A : Optional[Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__A : Tuple = count_inversions_bf(__snake_case )
__A ,__A : Optional[Any] = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 8
print('number of inversions = ' , __snake_case )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__A : str = count_inversions_bf(__snake_case )
__A ,__A : Any = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , __snake_case )
# an empty list should also have zero inversions
__A : Union[str, Any] = []
__A : List[Any] = count_inversions_bf(__snake_case )
__A ,__A : List[Any] = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , __snake_case )
if __name__ == "__main__":
main() | 8 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
'''simple docstring'''
__A : Optional[int] = parent
__A : str = 13
__A : List[Any] = 7
__A : List[str] = True
__A : str = True
__A : Optional[Any] = True
__A : int = True
__A : Dict = 99
__A : Dict = 384
__A : Any = 2
__A : int = 4
__A : Optional[Any] = 37
__A : Optional[int] = 'gelu'
__A : Dict = 0.1
__A : Optional[int] = 0.1
__A : Any = 512
__A : int = 16
__A : List[str] = 2
__A : str = 0.02
__A : Any = 3
__A : str = 4
__A : Union[str, Any] = 128
__A : int = 2
__A : List[Any] = 9
__A : List[Any] = 1
__A : List[Any] = None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__A : str = None
if self.use_input_mask:
__A : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__A : Optional[Any] = None
if self.use_token_type_ids:
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__A : Optional[int] = None
__A : List[str] = None
__A : Dict = None
if self.use_labels:
__A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__A : str = ids_tensor([self.batch_size] , self.num_choices)
__A : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = TFConvBertModel(config=_UpperCAmelCase)
__A : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__A : Tuple = [input_ids, input_mask]
__A : Any = model(_UpperCAmelCase)
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : str = TFConvBertForMaskedLM(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : str = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = self.num_labels
__A : Any = TFConvBertForSequenceClassification(config=_UpperCAmelCase)
__A : Optional[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = self.num_choices
__A : List[str] = TFConvBertForMultipleChoice(config=_UpperCAmelCase)
__A : int = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : List[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : int = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__A : Optional[Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = self.num_labels
__A : List[Any] = TFConvBertForTokenClassification(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : int = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = TFConvBertForQuestionAnswering(config=_UpperCAmelCase)
__A : Any = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Union[str, Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.prepare_config_and_inputs()
(
(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,
) : Union[str, Any] = config_and_inputs
__A : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = TFConvBertModelTester(self)
__A : str = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : List[str] = True
__A : List[str] = True
if hasattr(_UpperCAmelCase , 'use_cache'):
__A : List[Any] = True
__A : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : Union[str, Any] = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
for model_class in self.all_model_classes:
__A : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)
__A : Optional[int] = model_class(_UpperCAmelCase)
__A : Optional[Any] = len(model(_UpperCAmelCase))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase)
__A : Union[str, Any] = os.path.join(_UpperCAmelCase , 'saved_model' , '1')
__A : Tuple = tf.keras.models.load_model(_UpperCAmelCase)
__A : str = model(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Optional[int] = outputs['encoder_hidden_states']
__A : str = outputs['encoder_attentions']
else:
__A : List[Any] = outputs['hidden_states']
__A : Optional[Any] = outputs['attentions']
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
__A : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
self.assertListEqual(
list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
self.assertIsNotNone(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = True
__A : str = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length)
__A : Any = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : int = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
__A : Tuple = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
def check_decoder_attentions_output(_UpperCAmelCase):
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(out_len % 2 , 0)
__A : Any = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase):
__A : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__A : Dict = True
__A : Any = False
__A : str = model_class(_UpperCAmelCase)
__A : List[str] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_decoder_attentions_output(_UpperCAmelCase)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__A : int = True
__A : Tuple = model_class(_UpperCAmelCase)
__A : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
# Check attention is always last and order is fine
__A : Any = True
__A : str = True
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : Union[str, Any] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase))
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
@require_tf
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
__A : str = tf.constant([[0, 1, 2, 3, 4, 5]])
__A : Optional[int] = model(_UpperCAmelCase)[0]
__A : List[Any] = [1, 6, 768]
self.assertEqual(output.shape , _UpperCAmelCase)
__A : Tuple = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
])
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4) | 8 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = 42
class SCREAMING_SNAKE_CASE (a__ , a__ ):
@register_to_config
def __init__( self , _UpperCAmelCase = 6_5536 , _UpperCAmelCase = None , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = 0 , _UpperCAmelCase = "fourier" , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = 0.0 , _UpperCAmelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _UpperCAmelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _UpperCAmelCase = "UNetMidBlock1D" , _UpperCAmelCase = None , _UpperCAmelCase = (32, 32, 64) , _UpperCAmelCase = None , _UpperCAmelCase = 8 , _UpperCAmelCase = 1 , _UpperCAmelCase = False , ):
'''simple docstring'''
super().__init__()
__A : Dict = sample_size
# time
if time_embedding_type == "fourier":
__A : int = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_UpperCAmelCase , log=_UpperCAmelCase , flip_sin_to_cos=_UpperCAmelCase)
__A : Any = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
__A : List[str] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=_UpperCAmelCase , downscale_freq_shift=_UpperCAmelCase)
__A : List[str] = block_out_channels[0]
if use_timestep_embedding:
__A : Optional[Any] = block_out_channels[0] * 4
__A : Optional[int] = TimestepEmbedding(
in_channels=_UpperCAmelCase , time_embed_dim=_UpperCAmelCase , act_fn=_UpperCAmelCase , out_dim=block_out_channels[0] , )
__A : Dict = nn.ModuleList([])
__A : Dict = None
__A : Tuple = nn.ModuleList([])
__A : Tuple = None
# down
__A : Any = in_channels
for i, down_block_type in enumerate(_UpperCAmelCase):
__A : Tuple = output_channel
__A : Optional[Any] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
__A : List[str] = i == len(_UpperCAmelCase) - 1
__A : int = get_down_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_UpperCAmelCase)
# mid
__A : str = get_mid_block(
_UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_UpperCAmelCase , add_downsample=_UpperCAmelCase , )
# up
__A : Optional[int] = list(reversed(_UpperCAmelCase))
__A : Optional[int] = reversed_block_out_channels[0]
if out_block_type is None:
__A : str = out_channels
else:
__A : List[Any] = block_out_channels[0]
for i, up_block_type in enumerate(_UpperCAmelCase):
__A : Optional[Any] = output_channel
__A : Optional[Any] = (
reversed_block_out_channels[i + 1] if i < len(_UpperCAmelCase) - 1 else final_upsample_channels
)
__A : Dict = i == len(_UpperCAmelCase) - 1
__A : str = get_up_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_UpperCAmelCase)
__A : Optional[int] = output_channel
# out
__A : str = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32)
__A : Optional[Any] = get_out_block(
out_block_type=_UpperCAmelCase , num_groups_out=_UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=_UpperCAmelCase , act_fn=_UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , ):
'''simple docstring'''
__A : Any = timestep
if not torch.is_tensor(_UpperCAmelCase):
__A : Any = torch.tensor([timesteps] , dtype=torch.long , device=sample.device)
elif torch.is_tensor(_UpperCAmelCase) and len(timesteps.shape) == 0:
__A : Any = timesteps[None].to(sample.device)
__A : List[Any] = self.time_proj(_UpperCAmelCase)
if self.config.use_timestep_embedding:
__A : Dict = self.time_mlp(_UpperCAmelCase)
else:
__A : Dict = timestep_embed[..., None]
__A : Tuple = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype)
__A : List[Any] = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]))
# 2. down
__A : int = ()
for downsample_block in self.down_blocks:
__A ,__A : int = downsample_block(hidden_states=_UpperCAmelCase , temb=_UpperCAmelCase)
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
__A : Optional[int] = self.mid_block(_UpperCAmelCase , _UpperCAmelCase)
# 4. up
for i, upsample_block in enumerate(self.up_blocks):
__A : Any = down_block_res_samples[-1:]
__A : Optional[int] = down_block_res_samples[:-1]
__A : Any = upsample_block(_UpperCAmelCase , res_hidden_states_tuple=_UpperCAmelCase , temb=_UpperCAmelCase)
# 5. post-process
if self.out_block:
__A : Dict = self.out_block(_UpperCAmelCase , _UpperCAmelCase)
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_UpperCAmelCase) | 8 |
'''simple docstring'''
import argparse
import os
import re
lowercase__ : Optional[int] = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
lowercase__ : Dict = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase__ : List[str] = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase__ : Tuple = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase__ : str = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase__ : str = re.compile(r'''\[([^\]]+)\]''')
def _lowerCAmelCase ( __snake_case : str ) -> Tuple:
__A : List[Any] = _re_indent.search(__snake_case )
return "" if search is None else search.groups()[0]
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : str="" , __snake_case : Any=None , __snake_case : List[Any]=None ) -> Optional[int]:
__A : Tuple = 0
__A : Optional[int] = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(__snake_case ):
index += 1
__A : Optional[int] = ['\n'.join(lines[:index] )]
else:
__A : Any = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__A : Tuple = [lines[index]]
index += 1
while index < len(__snake_case ) and (end_prompt is None or not lines[index].startswith(__snake_case )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__snake_case ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(__snake_case ) )
if index < len(__snake_case ) - 1:
__A : Union[str, Any] = [lines[index + 1]]
index += 1
else:
__A : Union[str, Any] = []
else:
blocks.append('\n'.join(__snake_case ) )
__A : Optional[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__snake_case ) > 0:
blocks.append('\n'.join(__snake_case ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__snake_case ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def _lowerCAmelCase ( __snake_case : List[Any] ) -> int:
def _inner(__snake_case : List[Any] ):
return key(__snake_case ).lower().replace('_' , '' )
return _inner
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any=None ) -> List[Any]:
# If no key is provided, we use a noop.
def noop(__snake_case : List[Any] ):
return x
if key is None:
__A : Optional[Any] = noop
# Constants are all uppercase, they go first.
__A : str = [obj for obj in objects if key(__snake_case ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__A : List[str] = [obj for obj in objects if key(__snake_case )[0].isupper() and not key(__snake_case ).isupper()]
# Functions begin with a lowercase, they go last.
__A : str = [obj for obj in objects if not key(__snake_case )[0].isupper()]
__A : Tuple = ignore_underscore(__snake_case )
return sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case )
def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Tuple:
# This inner function sort imports between [ ].
def _replace(__snake_case : Tuple ):
__A : List[str] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
__A : int = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__A : Dict = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(__snake_case )] ) + "]"
__A : List[Any] = import_statement.split('\n' )
if len(__snake_case ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__A : Optional[int] = 2 if lines[1].strip() == '[' else 1
__A : Any = [(i, _re_strip_line.search(__snake_case ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__A : Optional[int] = sort_objects(__snake_case , key=lambda __snake_case : x[1] )
__A : Any = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__snake_case ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__A : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
__A : Dict = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__A : Tuple = keys[:-1]
__A : List[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(__snake_case )] )
return "\n".join(__snake_case )
else:
# Finally we have to deal with imports fitting on one line
__A : Optional[Any] = _re_bracket_content.sub(_replace , __snake_case )
return import_statement
def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : List[Any]=True ) -> Optional[Any]:
with open(__snake_case , 'r' ) as f:
__A : Dict = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__A : str = split_code_in_indented_blocks(
__snake_case , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__snake_case ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__A : Tuple = main_blocks[block_idx]
__A : int = block.split('\n' )
# Get to the start of the imports.
__A : Tuple = 0
while line_idx < len(__snake_case ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__A : Optional[int] = len(__snake_case )
else:
line_idx += 1
if line_idx >= len(__snake_case ):
continue
# Ignore beginning and last line: they don't contain anything.
__A : Dict = '\n'.join(block_lines[line_idx:-1] )
__A : int = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__A : Optional[int] = split_code_in_indented_blocks(__snake_case , indent_level=__snake_case )
# We have two categories of import key: list or _import_structure[key].append/extend
__A : Any = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__A : Dict = [(pattern.search(__snake_case ).groups()[0] if pattern.search(__snake_case ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__A : Optional[Any] = [(i, key) for i, key in enumerate(__snake_case ) if key is not None]
__A : Tuple = [x[0] for x in sorted(__snake_case , key=lambda __snake_case : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__A : str = 0
__A : Any = []
for i in range(len(__snake_case ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__A : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__snake_case )
count += 1
# And we put our main block back together with its first and last line.
__A : int = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__snake_case ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(__snake_case , 'w' ) as f:
f.write('\n'.join(__snake_case ) )
def _lowerCAmelCase ( __snake_case : int=True ) -> Optional[Any]:
__A : Tuple = []
for root, _, files in os.walk(__snake_case ):
if "__init__.py" in files:
__A : List[Any] = sort_imports(os.path.join(__snake_case , '__init__.py' ) , check_only=__snake_case )
if result:
__A : Dict = [os.path.join(__snake_case , '__init__.py' )]
if len(__snake_case ) > 0:
raise ValueError(f'Would overwrite {len(__snake_case )} files, run `make style`.' )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowercase__ : Union[str, Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only) | 8 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('The given input must be positive' )
# get the generated string sequence
__A : str = gray_code_sequence_string(__snake_case )
#
# convert them to integers
for i in range(len(__snake_case ) ):
__A : Optional[int] = int(sequence[i] , 2 )
return sequence
def _lowerCAmelCase ( __snake_case : int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__A : Tuple = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__A : List[Any] = gray_code_sequence_string(bit_count - 1 )
__A : List[str] = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__A : Union[str, Any] = '0' + smaller_sequence[i]
sequence.append(__snake_case )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__A : str = '1' + smaller_sequence[i]
sequence.append(__snake_case )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int ) -> bool:
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
lowercase__ : int = int(input('''Enter number: ''').strip())
print(f"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""") | 8 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> int:
__A : Union[str, Any] = SwinConfig(image_size=1_92 )
if "base" in model_name:
__A : Any = 6
__A : Tuple = 1_28
__A : str = (2, 2, 18, 2)
__A : Dict = (4, 8, 16, 32)
elif "large" in model_name:
__A : str = 12
__A : Tuple = 1_92
__A : str = (2, 2, 18, 2)
__A : List[Any] = (6, 12, 24, 48)
else:
raise ValueError('Model not supported, only supports base and large variants' )
__A : int = window_size
__A : Optional[int] = embed_dim
__A : List[Any] = depths
__A : Dict = num_heads
return config
def _lowerCAmelCase ( __snake_case : str ) -> Dict:
if "encoder.mask_token" in name:
__A : str = name.replace('encoder.mask_token' , 'embeddings.mask_token' )
if "encoder.patch_embed.proj" in name:
__A : Optional[int] = name.replace('encoder.patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "encoder.patch_embed.norm" in name:
__A : str = name.replace('encoder.patch_embed.norm' , 'embeddings.norm' )
if "attn.proj" in name:
__A : Optional[Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__A : str = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__A : Any = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__A : int = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__A : str = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__A : Dict = name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
__A : str = 'layernorm.weight'
if name == "encoder.norm.bias":
__A : List[Any] = 'layernorm.bias'
if "decoder" in name:
pass
else:
__A : List[str] = 'swin.' + name
return name
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : Any ) -> List[str]:
for key in orig_state_dict.copy().keys():
__A : List[str] = orig_state_dict.pop(__snake_case )
if "attn_mask" in key:
pass
elif "qkv" in key:
__A : Tuple = key.split('.' )
__A : List[str] = int(key_split[2] )
__A : Dict = int(key_split[4] )
__A : Union[str, Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__A : str = val[:dim, :]
__A : Dict = val[
dim : dim * 2, :
]
__A : Any = val[-dim:, :]
else:
__A : Tuple = val[
:dim
]
__A : Any = val[
dim : dim * 2
]
__A : Union[str, Any] = val[
-dim:
]
else:
__A : Tuple = val
return orig_state_dict
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : str ) -> Union[str, Any]:
__A : Optional[int] = torch.load(__snake_case , map_location='cpu' )['model']
__A : str = get_swin_config(__snake_case )
__A : str = SwinForMaskedImageModeling(__snake_case )
model.eval()
__A : Tuple = convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
__A : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__A : Optional[int] = ViTImageProcessor(size={'height': 1_92, 'width': 1_92} )
__A : Tuple = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
__A : List[Any] = image_processor(images=__snake_case , return_tensors='pt' )
with torch.no_grad():
__A : List[str] = model(**__snake_case ).logits
print(outputs.keys() )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__snake_case )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
print(f'Pushing model and image processor for {model_name} to hub' )
model.push_to_hub(f'microsoft/{model_name}' )
image_processor.push_to_hub(f'microsoft/{model_name}' )
if __name__ == "__main__":
lowercase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowercase__ : List[Any] = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub) | 8 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : str = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Tuple:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__A : Optional[Any] = k.replace(__snake_case , __snake_case )
if k.startswith('encoder' ):
__A : Any = k.replace('.attn' , '.self_attn' )
__A : Any = k.replace('norm1' , 'self_attn_layer_norm' )
__A : str = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
__A : Tuple = k.replace('norm1' , 'self_attn_layer_norm' )
__A : str = k.replace('norm2' , 'encoder_attn_layer_norm' )
__A : int = k.replace('norm3' , 'final_layer_norm' )
return k
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Dict:
__A : Optional[int] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
__A : Tuple = sd.pop(__snake_case )
__A : Union[str, Any] = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
__A : str = v
lowercase__ : Tuple = ['''START''']
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any , __snake_case : List[Any] ) -> int:
__A : List[str] = torch.load(__snake_case , map_location='cpu' )
__A : Tuple = model['model']
__A : str = BlenderbotConfig.from_json_file(__snake_case )
__A : int = BlenderbotForConditionalGeneration(__snake_case )
__A : List[Any] = m.model.state_dict().keys()
__A : Optional[int] = []
__A : Optional[int] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__A : Union[str, Any] = rename_state_dict_key(__snake_case )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__A : Optional[Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__snake_case )
m.model.load_state_dict(__snake_case , strict=__snake_case )
m.half()
m.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json) | 8 | 1 |
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class SCREAMING_SNAKE_CASE (a__ ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = tempfile.mkdtemp()
__A : int = 5
# Realm tok
__A : str = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'test',
'question',
'this',
'is',
'the',
'first',
'second',
'third',
'fourth',
'fifth',
'record',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__A : Union[str, Any] = os.path.join(self.tmpdirname , 'realm_tokenizer')
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase)
__A : str = os.path.join(_UpperCAmelCase , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
__A : Optional[Any] = os.path.join(self.tmpdirname , 'realm_block_records')
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer'))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = RealmConfig(num_block_records=self.num_block_records)
return config
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = Dataset.from_dict(
{
'id': ['0', '1'],
'question': ['foo', 'bar'],
'answers': [['Foo', 'Bar'], ['Bar']],
})
return dataset
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = np.array(
[
B'This is the first record',
B'This is the second record',
B'This is the third record',
B'This is the fourth record',
B'This is the fifth record',
B'This is a longer longer longer record',
] , dtype=_UpperCAmelCase , )
return block_records
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = self.get_config()
__A : Optional[int] = self.get_dummy_retriever()
__A : Dict = retriever.tokenizer
__A : Optional[int] = np.array([0, 3] , dtype='long')
__A : int = tokenizer(['Test question']).input_ids
__A : List[str] = tokenizer(
['the fourth'] , add_special_tokens=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , ).input_ids
__A : List[Any] = config.reader_seq_len
__A ,__A ,__A ,__A : Any = retriever(
_UpperCAmelCase , _UpperCAmelCase , answer_ids=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors='np')
self.assertEqual(len(_UpperCAmelCase) , 2)
self.assertEqual(len(_UpperCAmelCase) , 2)
self.assertEqual(len(_UpperCAmelCase) , 2)
self.assertEqual(concat_inputs.input_ids.shape , (2, 10))
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10))
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10))
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10))
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0]) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1]) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.get_config()
__A : List[Any] = self.get_dummy_retriever()
__A : Union[str, Any] = retriever.tokenizer
__A : Any = np.array([0, 3, 5] , dtype='long')
__A : Tuple = tokenizer(['Test question']).input_ids
__A : Any = tokenizer(
['the fourth', 'longer longer'] , add_special_tokens=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , ).input_ids
__A : Union[str, Any] = config.reader_seq_len
__A ,__A ,__A ,__A : Dict = retriever(
_UpperCAmelCase , _UpperCAmelCase , answer_ids=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors='np')
self.assertEqual([False, True, True] , _UpperCAmelCase)
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , _UpperCAmelCase)
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records'))
# Test local path
__A : str = retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records'))
self.assertEqual(retriever.block_records[0] , B'This is the first record')
# Test mocked remote path
with patch('transformers.models.realm.retrieval_realm.hf_hub_download') as mock_hf_hub_download:
__A : int = os.path.join(
os.path.join(self.tmpdirname , 'realm_block_records') , _REALM_BLOCK_RECORDS_FILENAME)
__A : int = RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa')
self.assertEqual(retriever.block_records[0] , B'This is the first record') | 8 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None):
'''simple docstring'''
__A : List[Any] = list(poly_a or [0])[:]
__A : Optional[int] = list(poly_b or [0])[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
__A : Union[str, Any] = len(self.polyA)
while self.polyB[-1] == 0:
self.polyB.pop()
__A : Optional[int] = len(self.polyB)
# Add 0 to make lengths equal a power of 2
__A : Optional[Any] = int(
2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1)))
while len(self.polyA) < self.c_max_length:
self.polyA.append(0)
while len(self.polyB) < self.c_max_length:
self.polyB.append(0)
# A complex root used for the fourier transform
__A : str = complex(mpmath.root(x=1 , n=self.c_max_length , k=1))
# The product
__A : Tuple = self.__multiply()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(_UpperCAmelCase) <= 1:
return dft[0]
#
__A : Dict = self.c_max_length // 2
while next_ncol > 0:
__A : Optional[Any] = [[] for i in range(_UpperCAmelCase)]
__A : Tuple = self.root**next_ncol
# First half of next step
__A : Optional[Any] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(_UpperCAmelCase):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j])
current_root *= root
# Second half of next step
__A : List[str] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(_UpperCAmelCase):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j])
current_root *= root
# Update
__A : Optional[int] = new_dft
__A : Tuple = next_ncol // 2
return dft[0]
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.__dft('A')
__A : Optional[Any] = self.__dft('B')
__A : str = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0]) <= 1:
return inverce_c[0]
# Inverse DFT
__A : Dict = 2
while next_ncol <= self.c_max_length:
__A : Optional[int] = [[] for i in range(_UpperCAmelCase)]
__A : Any = self.root ** (next_ncol // 2)
__A : Tuple = 1
# First half of next step
for j in range(self.c_max_length // next_ncol):
for i in range(next_ncol // 2):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2)
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root))
current_root *= root
# Update
__A : int = new_inverse_c
next_ncol *= 2
# Unpack
__A : Optional[int] = [round(x[0].real , 8) + round(x[0].imag , 8) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self):
'''simple docstring'''
__A : int = 'A = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A]))
__A : Optional[Any] = 'B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B]))
__A : str = 'A*B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.product))
return F'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
lowercase__ : Optional[int] = data_utils.TransfoXLTokenizer
lowercase__ : str = data_utils.TransfoXLCorpus
lowercase__ : Union[str, Any] = data_utils
lowercase__ : int = data_utils
def _lowerCAmelCase ( __snake_case : str , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : List[Any] ) -> Optional[Any]:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__snake_case , 'rb' ) as fp:
__A : Union[str, Any] = pickle.load(__snake_case , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
__A : Any = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(f'Save vocabulary to {pytorch_vocab_dump_path}' )
__A : Optional[Any] = corpus.vocab.__dict__
torch.save(__snake_case , __snake_case )
__A : List[str] = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , __snake_case )
__A : Dict = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(f'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(__snake_case , __snake_case )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
__A : Any = os.path.abspath(__snake_case )
__A : List[Any] = os.path.abspath(__snake_case )
print(f'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
__A : List[str] = TransfoXLConfig()
else:
__A : str = TransfoXLConfig.from_json_file(__snake_case )
print(f'Building PyTorch model from configuration: {config}' )
__A : Any = TransfoXLLMHeadModel(__snake_case )
__A : Optional[Any] = load_tf_weights_in_transfo_xl(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
__A : Dict = os.path.join(__snake_case , __snake_case )
__A : Optional[int] = os.path.join(__snake_case , __snake_case )
print(f'Save PyTorch model to {os.path.abspath(__snake_case )}' )
torch.save(model.state_dict() , __snake_case )
print(f'Save configuration file to {os.path.abspath(__snake_case )}' )
with open(__snake_case , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
lowercase__ : int = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
) | 8 |
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=[30, 30] , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=None , _UpperCAmelCase=8 , _UpperCAmelCase=10 , ):
'''simple docstring'''
__A : Union[str, Any] = parent
__A : Tuple = batch_size
__A : List[str] = image_size
__A : Dict = patch_size
__A : Optional[Any] = num_channels
__A : Tuple = is_training
__A : Dict = use_labels
__A : List[Any] = hidden_size
__A : Tuple = num_hidden_layers
__A : int = num_attention_heads
__A : Optional[int] = intermediate_size
__A : Tuple = hidden_act
__A : Any = hidden_dropout_prob
__A : Optional[Any] = attention_probs_dropout_prob
__A : List[Any] = type_sequence_label_size
__A : List[Any] = initializer_range
__A : Optional[int] = num_labels
__A : List[Any] = scope
__A : Any = n_targets
__A : Union[str, Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
__A : List[str] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
__A : int = num_patches + 1 + self.num_detection_tokens
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]])
__A : Tuple = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
__A : List[Any] = []
for i in range(self.batch_size):
__A : Optional[int] = {}
__A : Union[str, Any] = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=_UpperCAmelCase)
__A : str = torch.rand(self.n_targets , 4 , device=_UpperCAmelCase)
labels.append(_UpperCAmelCase)
__A : Any = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = YolosModel(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = YolosForObjectDetection(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : str = model(pixel_values=_UpperCAmelCase)
__A : List[str] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
__A : Union[str, Any] = model(pixel_values=_UpperCAmelCase , labels=_UpperCAmelCase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.prepare_config_and_inputs()
__A ,__A ,__A : Tuple = config_and_inputs
__A : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCAmelCase = (
{'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
__A : Optional[Any] = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase)
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
__A : Any = []
for i in range(self.model_tester.batch_size):
__A : Tuple = {}
__A : Tuple = torch.ones(
size=(self.model_tester.n_targets,) , device=_UpperCAmelCase , dtype=torch.long)
__A : Optional[Any] = torch.ones(
self.model_tester.n_targets , 4 , device=_UpperCAmelCase , dtype=torch.float)
labels.append(_UpperCAmelCase)
__A : str = labels
return inputs_dict
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = YolosModelTester(self)
__A : Dict = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Tuple = model_class(_UpperCAmelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__A : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[Any] = model_class(_UpperCAmelCase)
__A : str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : int = [*signature.parameters.keys()]
__A : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__A : Optional[int] = True
# in YOLOS, the seq_len is different
__A : Dict = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
__A : Dict = True
__A : Dict = False
__A : Union[str, Any] = True
__A : Tuple = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : Any = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Union[str, Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__A : List[Any] = True
__A : List[str] = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Optional[Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__A : str = len(_UpperCAmelCase)
# Check attention is always last and order is fine
__A : Dict = True
__A : Dict = True
__A : Dict = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Union[str, Any] = 1
self.assertEqual(out_len + added_hidden_states , len(_UpperCAmelCase))
__A : Optional[Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
__A : Tuple = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Optional[Any] = outputs.hidden_states
__A : List[str] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
# YOLOS has a different seq_length
__A : Dict = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[str] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : Optional[int] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : List[Any] = YolosModel.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
def _lowerCAmelCase ( ) -> int:
__A : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('hustvl/yolos-small') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = YolosForObjectDetection.from_pretrained('hustvl/yolos-small').to(_UpperCAmelCase)
__A : Any = self.default_image_processor
__A : str = prepare_img()
__A : int = image_processor(images=_UpperCAmelCase , return_tensors='pt').to(_UpperCAmelCase)
# forward pass
with torch.no_grad():
__A : str = model(inputs.pixel_values)
# verify outputs
__A : Tuple = torch.Size((1, 100, 92))
self.assertEqual(outputs.logits.shape , _UpperCAmelCase)
__A : Dict = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=_UpperCAmelCase , )
__A : int = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=_UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _UpperCAmelCase , atol=1e-4))
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _UpperCAmelCase , atol=1e-4))
# verify postprocessing
__A : List[str] = image_processor.post_process_object_detection(
_UpperCAmelCase , threshold=0.3 , target_sizes=[image.size[::-1]])[0]
__A : Optional[int] = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861]).to(_UpperCAmelCase)
__A : Union[str, Any] = [75, 75, 17, 63, 17]
__A : Any = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495]).to(_UpperCAmelCase)
self.assertEqual(len(results['scores']) , 5)
self.assertTrue(torch.allclose(results['scores'] , _UpperCAmelCase , atol=1e-4))
self.assertSequenceEqual(results['labels'].tolist() , _UpperCAmelCase)
self.assertTrue(torch.allclose(results['boxes'][0, :] , _UpperCAmelCase)) | 8 | 1 |
'''simple docstring'''
import math
import sys
def _lowerCAmelCase ( __snake_case : int ) -> int:
if number != int(__snake_case ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
__A : str = [-1] * (number + 1)
__A : Dict = 0
for i in range(1 , number + 1 ):
__A : int = sys.maxsize
__A : int = int(math.sqrt(__snake_case ) )
for j in range(1 , root + 1 ):
__A : str = 1 + answers[i - (j**2)]
__A : Dict = min(__snake_case , __snake_case )
__A : Union[str, Any] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
lowercase__ : Optional[int] = None
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : List[str] = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
lowercase__ : Dict = {
'''camembert-base''': 5_12,
}
lowercase__ : str = '''▁'''
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
lowerCAmelCase = CamembertTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , **_UpperCAmelCase , ):
'''simple docstring'''
__A : int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else mask_token
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__A : List[str] = vocab_file
__A : Optional[int] = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A : Optional[Any] = [self.cls_token_id]
__A : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
__A : Optional[int] = [self.sep_token_id]
__A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(_UpperCAmelCase):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__A : List[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_UpperCAmelCase):
copyfile(self.vocab_file , _UpperCAmelCase)
return (out_vocab_file,) | 8 | 1 |
'''simple docstring'''
import requests
lowercase__ : List[Any] = '''https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='''
def _lowerCAmelCase ( __snake_case : str ) -> None:
# fetching a list of articles in json format
__A : Union[str, Any] = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(f'{i}.) {article["title"]}' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='''<Your BBC News API key goes here>''') | 8 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowercase__ : Any = '''hf-internal-testing/tiny-random-bert'''
lowercase__ : Optional[Any] = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
lowercase__ : List[Any] = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase)
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_UpperCAmelCase))
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_UpperCAmelCase , _UpperCAmelCase)))
with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f:
__A : Any = f.read()
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase))
self.assertTrue(os.path.isfile(_UpperCAmelCase))
# File is cached at the same place the second time.
__A : Tuple = cached_file(_UpperCAmelCase , _UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
# Using a specific revision to test the full commit hash.
__A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='9b8c223')
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'):
__A : Dict = cached_file('tiny-random-bert' , _UpperCAmelCase)
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'):
__A : Optional[int] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='aaaa')
with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'):
__A : int = cached_file(_UpperCAmelCase , 'conf')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'):
__A : Any = cached_file(_UpperCAmelCase , 'conf')
with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f:
__A : Dict = f.read()
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , '.no_exist' , _UpperCAmelCase , 'conf')))
__A : List[Any] = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_missing_entries=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
__A : str = cached_file(_UpperCAmelCase , 'conf' , local_files_only=_UpperCAmelCase , _raise_exceptions_for_missing_entries=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
__A : List[str] = mock.Mock()
__A : Dict = 500
__A : List[str] = {}
__A : List[Any] = HTTPError
__A : Optional[Any] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=_UpperCAmelCase) as mock_head:
__A : Dict = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_connection_errors=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt'))
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'):
get_file_from_repo('bert-base-case' , _UpperCAmelCase)
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'):
get_file_from_repo('bert-base-cased' , _UpperCAmelCase , revision='ahaha')
__A : List[str] = get_file_from_repo('bert-base-cased' , _UpperCAmelCase)
# The name is the cached name which is not very easy to test, so instead we load the content.
__A : List[str] = json.loads(open(_UpperCAmelCase , 'r').read())
self.assertEqual(config['hidden_size'] , 768)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__A : Tuple = Path(_UpperCAmelCase) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(_UpperCAmelCase , 'a.txt') , str(_UpperCAmelCase))
self.assertIsNone(get_file_from_repo(_UpperCAmelCase , 'b.txt')) | 8 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=18 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , ):
'''simple docstring'''
__A : List[Any] = size if size is not None else {'height': 18, 'width': 18}
__A : Tuple = parent
__A : str = batch_size
__A : Optional[int] = num_channels
__A : str = image_size
__A : List[str] = min_resolution
__A : Optional[Any] = max_resolution
__A : str = do_resize
__A : Dict = size
__A : Optional[Any] = apply_ocr
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class SCREAMING_SNAKE_CASE (a__ , unittest.TestCase ):
lowerCAmelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = LayoutLMvaImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize'))
self.assertTrue(hasattr(_UpperCAmelCase , 'size'))
self.assertTrue(hasattr(_UpperCAmelCase , 'apply_ocr'))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'height': 18, 'width': 18})
__A : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {'height': 42, 'width': 42})
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image)
# Test not batched input
__A : int = image_processing(image_inputs[0] , return_tensors='pt')
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , _UpperCAmelCase)
self.assertIsInstance(encoding.boxes , _UpperCAmelCase)
# Test batched
__A : str = image_processing(_UpperCAmelCase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray)
# Test not batched input
__A : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__A : Any = image_processing(_UpperCAmelCase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor)
# Test not batched input
__A : Tuple = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__A : int = image_processing(_UpperCAmelCase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
__A : str = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test')
__A : List[Any] = Image.open(ds[0]['file']).convert('RGB')
__A : Optional[int] = image_processing(_UpperCAmelCase , return_tensors='pt')
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224))
self.assertEqual(len(encoding.words) , len(encoding.boxes))
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__A : str = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__A : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _UpperCAmelCase)
self.assertListEqual(encoding.boxes , _UpperCAmelCase)
# with apply_OCR = False
__A : str = LayoutLMvaImageProcessor(apply_ocr=_UpperCAmelCase)
__A : List[str] = image_processing(_UpperCAmelCase , return_tensors='pt')
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224)) | 8 |
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _lowerCAmelCase ( __snake_case : str , __snake_case : str , **__snake_case : List[Any] ) -> Any:
__A : Optional[Any] = AutoConfig.from_pretrained(__snake_case , **__snake_case )
__A : int = AutoModelForSeqaSeqLM.from_config(__snake_case )
model.save_pretrained(__snake_case )
AutoTokenizer.from_pretrained(__snake_case ).save_pretrained(__snake_case )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version) | 8 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(_UpperCAmelCase):
__A : Union[str, Any] = AutoConfig.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
__A : str = FlaxAutoModel.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(_UpperCAmelCase):
__A : Any = AutoConfig.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
__A : Optional[int] = FlaxAutoModel.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
__A : str = AutoTokenizer.from_pretrained(_UpperCAmelCase)
__A : str = FlaxBertModel.from_pretrained(_UpperCAmelCase)
__A : Dict = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX)
@jax.jit
def eval(**_UpperCAmelCase):
return model(**_UpperCAmelCase)
eval(**_UpperCAmelCase).block_until_ready()
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
__A : List[Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase)
__A : int = FlaxRobertaModel.from_pretrained(_UpperCAmelCase)
__A : List[str] = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX)
@jax.jit
def eval(**_UpperCAmelCase):
return model(**_UpperCAmelCase)
eval(**_UpperCAmelCase).block_until_ready()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase , 'bert-base is not a local folder and is not a valid model identifier'):
__A : Union[str, Any] = FlaxAutoModel.from_pretrained('bert-base')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
__A : Union[str, Any] = FlaxAutoModel.from_pretrained(_UpperCAmelCase , revision='aaaaaa')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
__A : List[Any] = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase , 'Use `from_pt=True` to load this model'):
__A : Tuple = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only') | 8 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
lowercase__ : Any = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''tapas'''
def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1024 , _UpperCAmelCase=[3, 256, 256, 2, 256, 256, 10] , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase=10.0 , _UpperCAmelCase=0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase="ratio" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase)
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__A : Dict = vocab_size
__A : Tuple = hidden_size
__A : Any = num_hidden_layers
__A : int = num_attention_heads
__A : Tuple = hidden_act
__A : Tuple = intermediate_size
__A : List[Any] = hidden_dropout_prob
__A : int = attention_probs_dropout_prob
__A : List[str] = max_position_embeddings
__A : Optional[int] = type_vocab_sizes
__A : str = initializer_range
__A : List[str] = layer_norm_eps
# Fine-tuning task hyperparameters
__A : List[str] = positive_label_weight
__A : List[Any] = num_aggregation_labels
__A : Optional[Any] = aggregation_loss_weight
__A : Tuple = use_answer_as_supervision
__A : List[str] = answer_loss_importance
__A : Any = use_normalized_answer_loss
__A : Any = huber_loss_delta
__A : Union[str, Any] = temperature
__A : Tuple = aggregation_temperature
__A : Optional[Any] = use_gumbel_for_cells
__A : List[str] = use_gumbel_for_aggregation
__A : Tuple = average_approximation_function
__A : List[str] = cell_selection_preference
__A : Dict = answer_loss_cutoff
__A : Union[str, Any] = max_num_rows
__A : Optional[Any] = max_num_columns
__A : int = average_logits_per_cell
__A : Optional[Any] = select_one_column
__A : int = allow_empty_column_selection
__A : List[Any] = init_cell_selection_weights_to_zero
__A : int = reset_position_index_per_cell
__A : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
__A : Optional[Any] = aggregation_labels
__A : List[str] = no_aggregation_label_index
if isinstance(self.aggregation_labels , _UpperCAmelCase):
__A : Optional[Any] = {int(_UpperCAmelCase): v for k, v in aggregation_labels.items()} | 8 | 1 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowercase__ : Tuple = logging.get_logger(__name__)
def _lowerCAmelCase ( __snake_case : Any , __snake_case : int , __snake_case : Optional[int] ) -> Optional[int]:
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def _lowerCAmelCase ( __snake_case : np.ndarray , __snake_case : Optional[str] , __snake_case : Optional[str] ) -> int:
__A : Any = to_pil_image(__snake_case )
__A ,__A : str = pil_image.size
__A : Any = pytesseract.image_to_data(__snake_case , lang=__snake_case , output_type='dict' , config=__snake_case )
__A ,__A ,__A ,__A ,__A : Any = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
__A : Dict = [idx for idx, word in enumerate(__snake_case ) if not word.strip()]
__A : Dict = [word for idx, word in enumerate(__snake_case ) if idx not in irrelevant_indices]
__A : int = [coord for idx, coord in enumerate(__snake_case ) if idx not in irrelevant_indices]
__A : Optional[Any] = [coord for idx, coord in enumerate(__snake_case ) if idx not in irrelevant_indices]
__A : List[Any] = [coord for idx, coord in enumerate(__snake_case ) if idx not in irrelevant_indices]
__A : int = [coord for idx, coord in enumerate(__snake_case ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
__A : str = []
for x, y, w, h in zip(__snake_case , __snake_case , __snake_case , __snake_case ):
__A : Dict = [x, y, x + w, y + h]
actual_boxes.append(__snake_case )
# finally, normalize the bounding boxes
__A : Tuple = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__snake_case , __snake_case , __snake_case ) )
assert len(__snake_case ) == len(__snake_case ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = ['''pixel_values''']
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = "" , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase)
__A : Union[str, Any] = size if size is not None else {'height': 224, 'width': 224}
__A : Optional[Any] = get_size_dict(_UpperCAmelCase)
__A : str = do_resize
__A : List[str] = size
__A : Any = resample
__A : List[str] = do_rescale
__A : Tuple = rescale_value
__A : Tuple = do_normalize
__A : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__A : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
__A : Dict = apply_ocr
__A : Optional[Any] = ocr_lang
__A : Optional[int] = tesseract_config
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Union[str, Any] = get_size_dict(_UpperCAmelCase)
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}')
__A : List[str] = (size['height'], size['width'])
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ):
'''simple docstring'''
__A : int = do_resize if do_resize is not None else self.do_resize
__A : Optional[Any] = size if size is not None else self.size
__A : Optional[Any] = get_size_dict(_UpperCAmelCase)
__A : Tuple = resample if resample is not None else self.resample
__A : Tuple = do_rescale if do_rescale is not None else self.do_rescale
__A : str = rescale_factor if rescale_factor is not None else self.rescale_factor
__A : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
__A : str = image_mean if image_mean is not None else self.image_mean
__A : Optional[Any] = image_std if image_std is not None else self.image_std
__A : Union[str, Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
__A : Optional[int] = ocr_lang if ocr_lang is not None else self.ocr_lang
__A : Dict = tesseract_config if tesseract_config is not None else self.tesseract_config
__A : Optional[int] = make_list_of_images(_UpperCAmelCase)
if not valid_images(_UpperCAmelCase):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('If do_normalize is True, image_mean and image_std must be specified.')
# All transformations expect numpy arrays.
__A : Any = [to_numpy_array(_UpperCAmelCase) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , 'pytesseract')
__A : Optional[Any] = []
__A : Optional[Any] = []
for image in images:
__A ,__A : int = apply_tesseract(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
words_batch.append(_UpperCAmelCase)
boxes_batch.append(_UpperCAmelCase)
if do_resize:
__A : Optional[Any] = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase) for image in images]
if do_rescale:
__A : Optional[int] = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase) for image in images]
if do_normalize:
__A : Optional[int] = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase) for image in images]
__A : List[str] = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase) for image in images]
__A : str = BatchFeature(data={'pixel_values': images} , tensor_type=_UpperCAmelCase)
if apply_ocr:
__A : List[Any] = words_batch
__A : Optional[Any] = boxes_batch
return data | 8 |
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=sys.maxsize):
'''simple docstring'''
__A : Union[str, Any] = 'bilinear'
__A : int = max_size
__A : Optional[Any] = short_edge_length
def __call__( self , _UpperCAmelCase):
'''simple docstring'''
__A : int = []
for img in imgs:
__A ,__A : Dict = img.shape[:2]
# later: provide list and randomly choose index for resize
__A : List[Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
__A : Tuple = size * 1.0 / min(_UpperCAmelCase , _UpperCAmelCase)
if h < w:
__A ,__A : Optional[Any] = size, scale * w
else:
__A ,__A : Optional[Any] = scale * h, size
if max(_UpperCAmelCase , _UpperCAmelCase) > self.max_size:
__A : Tuple = self.max_size * 1.0 / max(_UpperCAmelCase , _UpperCAmelCase)
__A : Tuple = newh * scale
__A : Dict = neww * scale
__A : Dict = int(neww + 0.5)
__A : Optional[int] = int(newh + 0.5)
if img.dtype == np.uinta:
__A : int = Image.fromarray(_UpperCAmelCase)
__A : Optional[int] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
__A : Dict = np.asarray(_UpperCAmelCase)
else:
__A : Optional[Any] = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
__A : Dict = nn.functional.interpolate(
_UpperCAmelCase , (newh, neww) , mode=self.interp_method , align_corners=_UpperCAmelCase).squeeze(0)
img_augs.append(_UpperCAmelCase)
return img_augs
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
__A : List[Any] = cfg.INPUT.FORMAT
__A : Dict = cfg.SIZE_DIVISIBILITY
__A : str = cfg.PAD_VALUE
__A : Union[str, Any] = cfg.INPUT.MAX_SIZE_TEST
__A : int = cfg.MODEL.DEVICE
__A : Tuple = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__A : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__A : int = lambda _UpperCAmelCase: (x - self.pixel_mean) / self.pixel_std
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = tuple(max(_UpperCAmelCase) for s in zip(*[img.shape for img in images]))
__A : Dict = [im.shape[-2:] for im in images]
__A : Optional[int] = [
nn.functional.pad(
_UpperCAmelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_UpperCAmelCase , _UpperCAmelCase)
]
return torch.stack(_UpperCAmelCase), torch.tensor(_UpperCAmelCase)
def __call__( self , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
with torch.no_grad():
if not isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : int = [images]
if single_image:
assert len(_UpperCAmelCase) == 1
for i in range(len(_UpperCAmelCase)):
if isinstance(images[i] , torch.Tensor):
images.insert(_UpperCAmelCase , images.pop(_UpperCAmelCase).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
_UpperCAmelCase , torch.as_tensor(img_tensorize(images.pop(_UpperCAmelCase) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
__A : str = torch.tensor([im.shape[:2] for im in images])
__A : List[str] = self.aug(_UpperCAmelCase)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__A : Any = [self.normalizer(_UpperCAmelCase) for x in images]
# now pad them to do the following operations
__A ,__A : Any = self.pad(_UpperCAmelCase)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__A : str = torch.true_divide(_UpperCAmelCase , _UpperCAmelCase)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : str ) -> Dict:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : Tuple[int, int] ) -> int:
assert torch.isfinite(__snake_case ).all(), "Box tensor contains infinite or NaN!"
__A ,__A : int = box_size
tensor[:, 0].clamp_(min=0 , max=__snake_case )
tensor[:, 1].clamp_(min=0 , max=__snake_case )
tensor[:, 2].clamp_(min=0 , max=__snake_case )
tensor[:, 3].clamp_(min=0 , max=__snake_case ) | 8 | 1 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Any , __snake_case : int , __snake_case : List[Any] ) -> str:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
__A : List[Any] = TapasConfig.from_json_file(__snake_case )
# set absolute/relative position embeddings parameter
__A : List[str] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__A : int = TapasForQuestionAnswering(config=__snake_case )
elif task == "WTQ":
# run_task_main.py hparams
__A : Optional[Any] = 4
__A : Tuple = True
# hparam_utils.py hparams
__A : Optional[Any] = 0.664_694
__A : Any = 0.207_951
__A : int = 0.121_194
__A : Dict = True
__A : Optional[Any] = True
__A : Union[str, Any] = False
__A : str = 0.0_352_513
__A : Union[str, Any] = TapasForQuestionAnswering(config=__snake_case )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__A : Optional[Any] = 4
__A : List[str] = False
# hparam_utils.py hparams
__A : Union[str, Any] = 36.4_519
__A : Optional[int] = 0.903_421
__A : Optional[Any] = 222.088
__A : Union[str, Any] = True
__A : List[Any] = True
__A : str = True
__A : List[Any] = 0.763_141
__A : Any = TapasForQuestionAnswering(config=__snake_case )
elif task == "TABFACT":
__A : int = TapasForSequenceClassification(config=__snake_case )
elif task == "MLM":
__A : int = TapasForMaskedLM(config=__snake_case )
elif task == "INTERMEDIATE_PRETRAINING":
__A : Tuple = TapasModel(config=__snake_case )
else:
raise ValueError(f'Task {task} not supported.' )
print(f'Building PyTorch model from configuration: {config}' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__snake_case , __snake_case , __snake_case )
# Save pytorch-model (weights and configuration)
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__snake_case )
# Save tokenizer files
print(f'Save tokenizer files to {pytorch_dump_path}' )
__A : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=5_12 )
tokenizer.save_pretrained(__snake_case )
print('Used relative position embeddings:' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.'''
)
parser.add_argument(
'''--reset_position_index_per_cell''',
default=False,
action='''store_true''',
help='''Whether to use relative position embeddings or not. Defaults to True.''',
)
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--tapas_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained TAPAS model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase__ : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
) | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Optional[Any]: # noqa: E741
__A : Tuple = len(__snake_case )
__A : Optional[int] = 0
__A : str = [0] * n
__A : int = [False] * n
__A : Tuple = [False] * n
def dfs(__snake_case : List[str] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : int ):
if parent == root:
out_edge_count += 1
__A : str = True
__A : Tuple = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__A : Optional[int] = dfs(__snake_case , __snake_case , __snake_case , __snake_case )
__A : int = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__A : Tuple = True
# AP found via cycle
if at == low[to]:
__A : Optional[Any] = True
else:
__A : Any = min(low[at] , __snake_case )
return out_edge_count
for i in range(__snake_case ):
if not visited[i]:
__A : Tuple = 0
__A : List[Any] = dfs(__snake_case , __snake_case , -1 , __snake_case )
__A : Union[str, Any] = out_edge_count > 1
for x in range(len(__snake_case ) ):
if is_art[x] is True:
print(__snake_case )
# Adjacency list of graph
lowercase__ : Tuple = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data) | 8 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : list , __snake_case : int , __snake_case : int = 0 , __snake_case : int = 0 ) -> int:
__A : List[Any] = right or len(__snake_case ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__snake_case , __snake_case , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : int = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''',
'''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''',
'''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
lowercase__ : Dict = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _lowerCAmelCase ( __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Any , __snake_case : List[str] ) -> Union[str, Any]:
for attribute in key.split('.' ):
__A : int = getattr(__snake_case , __snake_case )
if weight_type is not None:
__A : Optional[int] = getattr(__snake_case , __snake_case ).shape
else:
__A : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
__A : Tuple = value
elif weight_type == "weight_g":
__A : Union[str, Any] = value
elif weight_type == "weight_v":
__A : Optional[Any] = value
elif weight_type == "bias":
__A : Optional[int] = value
else:
__A : Optional[int] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCAmelCase ( __snake_case : Any , __snake_case : List[str] ) -> List[Any]:
__A : Optional[Any] = []
__A : Any = fairseq_model.state_dict()
__A : Union[str, Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__A : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == 'group' , )
__A : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__A : int = True
if "*" in mapped_key:
__A : Any = name.split(__snake_case )[0].split('.' )[-2]
__A : List[Any] = mapped_key.replace('*' , __snake_case )
if "weight_g" in name:
__A : Optional[Any] = 'weight_g'
elif "weight_v" in name:
__A : Union[str, Any] = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
__A : Optional[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__A : Tuple = 'weight'
else:
__A : Dict = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f'Unused weights: {unused_weights}' )
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Optional[int] ) -> int:
__A : int = full_name.split('conv_layers.' )[-1]
__A : List[str] = name.split('.' )
__A : Optional[int] = int(items[0] )
__A : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
__A : Optional[int] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
__A : Union[str, Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
__A : Dict = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
__A : Any = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple=None ) -> Any:
# load the pre-trained checkpoints
__A : List[str] = torch.load(__snake_case )
__A : Dict = WavLMConfigOrig(checkpoint['cfg'] )
__A : Optional[int] = WavLMOrig(__snake_case )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
__A : List[Any] = WavLMConfig.from_pretrained(__snake_case )
else:
__A : Dict = WavLMConfig()
__A : Optional[Any] = WavLMModel(__snake_case )
recursively_load_weights(__snake_case , __snake_case )
hf_wavlm.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowercase__ : Any = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 8 | 1 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = ['''image_processor''', '''tokenizer''']
lowerCAmelCase = '''CLIPImageProcessor'''
lowerCAmelCase = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase):
'''simple docstring'''
__A : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
__A : Optional[Any] = kwargs.pop('feature_extractor')
__A : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(_UpperCAmelCase , _UpperCAmelCase)
def __call__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.')
if text is not None:
__A : int = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase)
if images is not None:
__A : Optional[int] = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase)
if text is not None and images is not None:
__A : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase) , tensor_type=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.tokenizer.model_input_names
__A : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor | 8 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = 42
class SCREAMING_SNAKE_CASE (a__ , a__ ):
@register_to_config
def __init__( self , _UpperCAmelCase = 6_5536 , _UpperCAmelCase = None , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = 0 , _UpperCAmelCase = "fourier" , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = 0.0 , _UpperCAmelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _UpperCAmelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _UpperCAmelCase = "UNetMidBlock1D" , _UpperCAmelCase = None , _UpperCAmelCase = (32, 32, 64) , _UpperCAmelCase = None , _UpperCAmelCase = 8 , _UpperCAmelCase = 1 , _UpperCAmelCase = False , ):
'''simple docstring'''
super().__init__()
__A : Dict = sample_size
# time
if time_embedding_type == "fourier":
__A : int = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_UpperCAmelCase , log=_UpperCAmelCase , flip_sin_to_cos=_UpperCAmelCase)
__A : Any = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
__A : List[str] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=_UpperCAmelCase , downscale_freq_shift=_UpperCAmelCase)
__A : List[str] = block_out_channels[0]
if use_timestep_embedding:
__A : Optional[Any] = block_out_channels[0] * 4
__A : Optional[int] = TimestepEmbedding(
in_channels=_UpperCAmelCase , time_embed_dim=_UpperCAmelCase , act_fn=_UpperCAmelCase , out_dim=block_out_channels[0] , )
__A : Dict = nn.ModuleList([])
__A : Dict = None
__A : Tuple = nn.ModuleList([])
__A : Tuple = None
# down
__A : Any = in_channels
for i, down_block_type in enumerate(_UpperCAmelCase):
__A : Tuple = output_channel
__A : Optional[Any] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
__A : List[str] = i == len(_UpperCAmelCase) - 1
__A : int = get_down_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_UpperCAmelCase)
# mid
__A : str = get_mid_block(
_UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_UpperCAmelCase , add_downsample=_UpperCAmelCase , )
# up
__A : Optional[int] = list(reversed(_UpperCAmelCase))
__A : Optional[int] = reversed_block_out_channels[0]
if out_block_type is None:
__A : str = out_channels
else:
__A : List[Any] = block_out_channels[0]
for i, up_block_type in enumerate(_UpperCAmelCase):
__A : Optional[Any] = output_channel
__A : Optional[Any] = (
reversed_block_out_channels[i + 1] if i < len(_UpperCAmelCase) - 1 else final_upsample_channels
)
__A : Dict = i == len(_UpperCAmelCase) - 1
__A : str = get_up_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_UpperCAmelCase)
__A : Optional[int] = output_channel
# out
__A : str = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32)
__A : Optional[Any] = get_out_block(
out_block_type=_UpperCAmelCase , num_groups_out=_UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=_UpperCAmelCase , act_fn=_UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , ):
'''simple docstring'''
__A : Any = timestep
if not torch.is_tensor(_UpperCAmelCase):
__A : Any = torch.tensor([timesteps] , dtype=torch.long , device=sample.device)
elif torch.is_tensor(_UpperCAmelCase) and len(timesteps.shape) == 0:
__A : Any = timesteps[None].to(sample.device)
__A : List[Any] = self.time_proj(_UpperCAmelCase)
if self.config.use_timestep_embedding:
__A : Dict = self.time_mlp(_UpperCAmelCase)
else:
__A : Dict = timestep_embed[..., None]
__A : Tuple = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype)
__A : List[Any] = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]))
# 2. down
__A : int = ()
for downsample_block in self.down_blocks:
__A ,__A : int = downsample_block(hidden_states=_UpperCAmelCase , temb=_UpperCAmelCase)
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
__A : Optional[int] = self.mid_block(_UpperCAmelCase , _UpperCAmelCase)
# 4. up
for i, upsample_block in enumerate(self.up_blocks):
__A : Any = down_block_res_samples[-1:]
__A : Optional[int] = down_block_res_samples[:-1]
__A : Any = upsample_block(_UpperCAmelCase , res_hidden_states_tuple=_UpperCAmelCase , temb=_UpperCAmelCase)
# 5. post-process
if self.out_block:
__A : Dict = self.out_block(_UpperCAmelCase , _UpperCAmelCase)
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_UpperCAmelCase) | 8 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : list ) -> list:
if len(__snake_case ) <= 1:
return [tuple(__snake_case )]
__A : Dict = []
def generate(__snake_case : int , __snake_case : list ):
__A : Dict = [0] * n
res.append(tuple(__snake_case ) )
__A : Dict = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
__A ,__A : Any = arr[i], arr[0]
else:
__A ,__A : int = arr[i], arr[c[i]]
res.append(tuple(__snake_case ) )
c[i] += 1
__A : Union[str, Any] = 0
else:
__A : Optional[int] = 0
i += 1
generate(len(__snake_case ) , __snake_case )
return res
if __name__ == "__main__":
lowercase__ : Union[str, Any] = input('''Enter numbers separated by a comma:\n''').strip()
lowercase__ : List[Any] = [int(item) for item in user_input.split(''',''')]
print(heaps(arr)) | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> int:
if len(__snake_case ) != len(__snake_case ):
raise ValueError('String lengths must match!' )
__A : Optional[Any] = 0
for chara, chara in zip(__snake_case , __snake_case ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int ) -> bool:
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
lowercase__ : int = int(input('''Enter number: ''').strip())
print(f"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""") | 8 |
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Tuple = logging.get_logger(__name__)
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> Union[str, Any]:
__A : int = RobertaPreLayerNormConfig.from_pretrained(
__snake_case , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
__A : Tuple = torch.load(hf_hub_download(repo_id=__snake_case , filename='pytorch_model.bin' ) )
__A : str = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
__A : Dict = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
__A : str = tensor_value
__A : Union[str, Any] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__snake_case , config=__snake_case , state_dict=__snake_case )
model.save_pretrained(__snake_case )
# convert tokenizer
__A : List[Any] = AutoTokenizer.from_pretrained(__snake_case )
tokenizer.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 8 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : Dict ) -> Tuple:
__A : Any = [0] * len(__snake_case )
__A : Dict = []
__A : List[Any] = [1] * len(__snake_case )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__snake_case ) ):
if indegree[i] == 0:
queue.append(__snake_case )
while queue:
__A : Tuple = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
__A : Any = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__snake_case )
print(max(__snake_case ) )
# Adjacency list of Graph
lowercase__ : Union[str, Any] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph) | 8 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
lowercase__ : Dict = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = field(default=a__ , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = super().to_dict()
for k, v in d.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : List[Any] = v.to_dict()
return d | 8 | 1 |
'''simple docstring'''
import logging
import os
from .state import PartialState
class SCREAMING_SNAKE_CASE (logging.LoggerAdapter ):
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase):
'''simple docstring'''
__A : Any = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.')
__A : List[Any] = kwargs.pop('main_process_only' , _UpperCAmelCase)
__A : str = kwargs.pop('in_order' , _UpperCAmelCase)
if self.isEnabledFor(_UpperCAmelCase):
if self._should_log(_UpperCAmelCase):
__A ,__A : Dict = self.process(_UpperCAmelCase , _UpperCAmelCase)
self.logger.log(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase)
elif in_order:
__A : Tuple = PartialState()
for i in range(state.num_processes):
if i == state.process_index:
__A ,__A : Union[str, Any] = self.process(_UpperCAmelCase , _UpperCAmelCase)
self.logger.log(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase)
state.wait_for_everyone()
def _lowerCAmelCase ( __snake_case : str , __snake_case : str = None ) -> str:
if log_level is None:
__A : List[Any] = os.environ.get('ACCELERATE_LOG_LEVEL' , __snake_case )
__A : Dict = logging.getLogger(__snake_case )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__snake_case , {} ) | 8 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''lxmert'''
lowerCAmelCase = {}
def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=9500 , _UpperCAmelCase=1600 , _UpperCAmelCase=400 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=9 , _UpperCAmelCase=5 , _UpperCAmelCase=5 , _UpperCAmelCase=2048 , _UpperCAmelCase=4 , _UpperCAmelCase=6.67 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Tuple = vocab_size
__A : int = hidden_size
__A : str = num_attention_heads
__A : Tuple = hidden_act
__A : int = intermediate_size
__A : str = hidden_dropout_prob
__A : Optional[int] = attention_probs_dropout_prob
__A : Optional[Any] = max_position_embeddings
__A : Tuple = type_vocab_size
__A : Optional[int] = initializer_range
__A : Any = layer_norm_eps
__A : Optional[Any] = num_qa_labels
__A : Optional[int] = num_object_labels
__A : Any = num_attr_labels
__A : Union[str, Any] = l_layers
__A : Optional[int] = x_layers
__A : List[Any] = r_layers
__A : Tuple = visual_feat_dim
__A : Tuple = visual_pos_dim
__A : Optional[int] = visual_loss_normalizer
__A : int = task_matched
__A : List[Any] = task_mask_lm
__A : Optional[Any] = task_obj_predict
__A : str = task_qa
__A : List[Any] = visual_obj_loss
__A : Optional[Any] = visual_attr_loss
__A : Union[str, Any] = visual_feat_loss
__A : Union[str, Any] = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**_UpperCAmelCase) | 8 | 1 |
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Tuple = logging.get_logger(__name__)
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> Union[str, Any]:
__A : int = RobertaPreLayerNormConfig.from_pretrained(
__snake_case , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
__A : Tuple = torch.load(hf_hub_download(repo_id=__snake_case , filename='pytorch_model.bin' ) )
__A : str = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
__A : Dict = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
__A : str = tensor_value
__A : Union[str, Any] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__snake_case , config=__snake_case , state_dict=__snake_case )
model.save_pretrained(__snake_case )
# convert tokenizer
__A : List[Any] = AutoTokenizer.from_pretrained(__snake_case )
tokenizer.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 8 |
'''simple docstring'''
import math
import sys
def _lowerCAmelCase ( __snake_case : int ) -> int:
if number != int(__snake_case ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
__A : str = [-1] * (number + 1)
__A : Dict = 0
for i in range(1 , number + 1 ):
__A : int = sys.maxsize
__A : int = int(math.sqrt(__snake_case ) )
for j in range(1 , root + 1 ):
__A : str = 1 + answers[i - (j**2)]
__A : Dict = min(__snake_case , __snake_case )
__A : Union[str, Any] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
lowercase__ : Union[str, Any] = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_28, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
lowercase__ : int = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_55, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
lowercase__ : Optional[Any] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_55)
lowercase__ : Optional[Any] = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
lowercase__ : List[str] = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
lowercase__ : Optional[Any] = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
lowercase__ : Dict = tf.keras.preprocessing.image.img_to_array(test_image)
lowercase__ : Dict = np.expand_dims(test_image, axis=0)
lowercase__ : Tuple = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
lowercase__ : Tuple = '''Normal'''
if result[0][0] == 1:
lowercase__ : Optional[Any] = '''Abnormality detected''' | 8 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __snake_case : list[int] , __snake_case : list[int] , __snake_case : int ) -> tuple[float, list[float]]:
__A : int = list(range(len(__snake_case ) ) )
__A : Optional[Any] = [v / w for v, w in zip(__snake_case , __snake_case )]
index.sort(key=lambda __snake_case : ratio[i] , reverse=__snake_case )
__A : float = 0
__A : list[float] = [0] * len(__snake_case )
for i in index:
if weight[i] <= capacity:
__A : Optional[int] = 1
max_value += value[i]
capacity -= weight[i]
else:
__A : List[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be trained.'''} )
lowerCAmelCase = field(
default='''./''' , metadata={'''help''': '''Save dir where model repo is cloned and models updates are saved to.'''} )
lowerCAmelCase = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path of training dataset.'''} )
lowerCAmelCase = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
lowerCAmelCase = field(default=2 , metadata={'''help''': '''Batch size for training.'''} )
lowerCAmelCase = field(default=2 , metadata={'''help''': '''Batch size for evaluation.'''} )
lowerCAmelCase = field(default=0.1 , metadata={'''help''': '''Value of weight decay.'''} )
lowerCAmelCase = field(
default=1_0000 , metadata={'''help''': '''Size of buffer used to shuffle streaming dataset.'''} )
lowerCAmelCase = field(default=2E-4 , metadata={'''help''': '''Learning rate fo training.'''} )
lowerCAmelCase = field(default='''cosine''' , metadata={'''help''': '''Learning rate.'''} )
lowerCAmelCase = field(
default=750 , metadata={'''help''': '''Number of warmup steps in the learning rate schedule.'''} )
lowerCAmelCase = field(
default=16 , metadata={'''help''': '''Number of gradient accumulation steps.'''} )
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''Use gradient checkpointing to reduce memory footprint.'''} )
lowerCAmelCase = field(default=5_0000 , metadata={'''help''': '''Maximum number of training steps.'''} )
lowerCAmelCase = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
lowerCAmelCase = field(default=1024 , metadata={'''help''': '''Sequence lengths used for training.'''} )
lowerCAmelCase = field(default=1 , metadata={'''help''': '''Training seed.'''} )
lowerCAmelCase = field(
default=1024 , metadata={'''help''': '''Interval to save checkpoints. Measured as number of forward passes not training steps.'''} , )
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''States path if the training should continue from a checkpoint folder.'''} )
lowerCAmelCase = field(default=a__ , metadata={'''help''': '''If True the data is pretokenized.'''} )
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
lowerCAmelCase = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
lowerCAmelCase = field(default=2 , metadata={'''help''': '''Batch size used for evaluation.'''} )
lowerCAmelCase = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
lowerCAmelCase = field(default=1024 , metadata={'''help''': '''Length of sequences to be evaluated.'''} )
lowerCAmelCase = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
lowerCAmelCase = field(default=a__ , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''The number of human-eval tasks to run. If not included all tasks are evaluated.'''} , )
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''Sample from the language model\'s output distribution.'''} )
lowerCAmelCase = field(default=0.2 , metadata={'''help''': '''Sampling temperature used for generation.'''} )
lowerCAmelCase = field(default=256 , metadata={'''help''': '''Maximum number of newly generated tokens.'''} )
lowerCAmelCase = field(default=0 , metadata={'''help''': '''Top-k parameter used for generation.'''} )
lowerCAmelCase = field(default=0.95 , metadata={'''help''': '''Top-p parameter used for nucleus sampling.'''} )
lowerCAmelCase = field(default=10 , metadata={'''help''': '''Number of generations to run in parallel.'''} )
lowerCAmelCase = field(
default=200 , metadata={'''help''': '''Number of completions to generate for each sample.'''} )
lowerCAmelCase = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
lowerCAmelCase = field(
default='''eval_results.json''' , metadata={'''help''': '''Random seed used for evaluation.'''} )
lowerCAmelCase = field(
default='''0''' , metadata={'''help''': '''Allow `code_eval` to execute Python code on machine'''} )
lowerCAmelCase = field(
default=-1 , metadata={
'''help''': (
'''Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'''
''' number corresponds to which GPU device id to run on.'''
)
} , )
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': '''The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'''
} , )
lowerCAmelCase = field(
default='''transformersbook/codeparrot''' , metadata={'''help''': '''Folder or name of dataset to process.'''} )
lowerCAmelCase = field(
default='''codeparrot-clean''' , metadata={'''help''': '''Folder to save processed processed dataset.'''} )
lowerCAmelCase = field(
default=10_0000 , metadata={'''help''': '''Number of files to save per JSON output file.'''} )
lowerCAmelCase = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
lowerCAmelCase = field(
default=1000 , metadata={'''help''': '''Maximum line length in file, otherwise file is filtered.'''} )
lowerCAmelCase = field(
default=100 , metadata={'''help''': '''Maximum mean line length in file, otherwise file is filtered.'''} )
lowerCAmelCase = field(
default=0.25 , metadata={'''help''': '''Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'''} )
lowerCAmelCase = field(
default=1.5 , metadata={'''help''': '''Minimum character token ratio for the file, otherwise file is filtered.'''} )
lowerCAmelCase = field(
default=0.7 , metadata={'''help''': '''Probability for filtering config, test and uncommon files.'''} )
lowerCAmelCase = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} , )
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''If True, near-duplicate samples are removed.'''} )
lowerCAmelCase = field(
default=0.85 , metadata={'''help''': '''Jaccard threshold for near-duplicate samples.'''} )
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = field(
default='''gpt2''' , metadata={'''help''': '''Base tokenizer to build new tokenizer from.'''} )
lowerCAmelCase = field(
default='''transformersbook/codeparrot-train''' , metadata={'''help''': '''Dataset to train tokenizer on.'''} )
lowerCAmelCase = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
lowerCAmelCase = field(default=20_0000 , metadata={'''help''': '''Number of examples to train tokenizer on.'''} )
lowerCAmelCase = field(
default=3_2768 , metadata={'''help''': '''Number of examples to train the tokenizer on.'''} )
lowerCAmelCase = field(default='''codeparrot''' , metadata={'''help''': '''Name of new tokenizer.'''} )
lowerCAmelCase = field(default=a__ , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} )
lowerCAmelCase = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path to the dataset to pretokenize.'''} )
lowerCAmelCase = field(
default='''tokenized-codeparrot-train''' , metadata={'''help''': '''Repo name of the pretokenized data.'''} )
lowerCAmelCase = field(default=a__ , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = field(
default='''gpt2-large''' , metadata={'''help''': '''Configuration to use for model initialization.'''} )
lowerCAmelCase = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Tokenizer attached to model.'''} )
lowerCAmelCase = field(default='''codeparrot''' , metadata={'''help''': '''Name of the created model.'''} )
lowerCAmelCase = field(default=a__ , metadata={'''help''': '''Push saved tokenizer to the hub.'''} ) | 8 |
'''simple docstring'''
from __future__ import annotations
import math
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : int = size
# approximate the overall size of segment tree with given value
__A : Optional[Any] = [0 for i in range(0 , 4 * size)]
# create array to store lazy update
__A : Optional[Any] = [0 for i in range(0 , 4 * size)]
__A : str = [0 for i in range(0 , 4 * size)] # flag for lazy update
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return idx * 2
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return idx * 2 + 1
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if left_element == right_element:
__A : List[Any] = a[left_element - 1]
else:
__A : List[str] = (left_element + right_element) // 2
self.build(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
self.build(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase)
__A : Any = max(
self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)])
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if self.flag[idx] is True:
__A : Optional[Any] = self.lazy[idx]
__A : Optional[Any] = False
if left_element != right_element:
__A : List[Any] = self.lazy[idx]
__A : Dict = self.lazy[idx]
__A : Tuple = True
__A : Union[str, Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
__A : Optional[int] = val
if left_element != right_element:
__A : Tuple = val
__A : Any = val
__A : Tuple = True
__A : Union[str, Any] = True
return True
__A : str = (left_element + right_element) // 2
self.update(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
self.update(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : int = max(
self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)])
return True
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if self.flag[idx] is True:
__A : Union[str, Any] = self.lazy[idx]
__A : List[str] = False
if left_element != right_element:
__A : Union[str, Any] = self.lazy[idx]
__A : Optional[int] = self.lazy[idx]
__A : str = True
__A : Union[str, Any] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
__A : Any = (left_element + right_element) // 2
__A : int = self.query(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : Union[str, Any] = self.query(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
return max(_UpperCAmelCase , _UpperCAmelCase)
def __str__( self):
'''simple docstring'''
return str([self.query(1 , 1 , self.size , _UpperCAmelCase , _UpperCAmelCase) for i in range(1 , self.size + 1)])
if __name__ == "__main__":
lowercase__ : Union[str, Any] = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
lowercase__ : str = 15
lowercase__ : List[Any] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 1_11)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 2_35)
print(segt) | 8 | 1 |
'''simple docstring'''
import numpy as np
import datasets
lowercase__ : Optional[int] = '''
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
'''
lowercase__ : List[str] = '''\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
'''
lowercase__ : Tuple = '''
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{\'mahalanobis\': array([0.5])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE (datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'X': datasets.Sequence(datasets.Value('float' , id='sequence') , id='X'),
}) , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = np.array(_UpperCAmelCase)
__A : Union[str, Any] = np.array(_UpperCAmelCase)
# Assert that arrays are 2D
if len(X.shape) != 2:
raise ValueError('Expected `X` to be a 2D vector')
if len(reference_distribution.shape) != 2:
raise ValueError('Expected `reference_distribution` to be a 2D vector')
if reference_distribution.shape[0] < 2:
raise ValueError(
'Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension')
# Get mahalanobis distance for each prediction
__A : Union[str, Any] = X - np.mean(_UpperCAmelCase)
__A : Tuple = np.cov(reference_distribution.T)
try:
__A : str = np.linalg.inv(_UpperCAmelCase)
except np.linalg.LinAlgError:
__A : List[Any] = np.linalg.pinv(_UpperCAmelCase)
__A : int = np.dot(_UpperCAmelCase , _UpperCAmelCase)
__A : Union[str, Any] = np.dot(_UpperCAmelCase , X_minus_mu.T).diagonal()
return {"mahalanobis": mahal_dist} | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int , __snake_case : int , __snake_case : int ) -> float:
__A : Dict = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _lowerCAmelCase ( ) -> Union[str, Any]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowercase__ : List[Any] = '''\
Text data.
Second line of data.'''
lowercase__ : List[Any] = '''file'''
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : Union[str, Any] ) -> Any:
__A : List[Any] = tmp_path_factory.mktemp('data' ) / (FILE_PATH + '.zstd')
__A : Any = bytes(__snake_case , 'utf-8' )
with zstd.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture
def _lowerCAmelCase ( __snake_case : List[Any] ) -> int:
with open(os.path.join(tmpfs.local_root_dir , __snake_case ) , 'w' ) as f:
f.write(__snake_case )
return FILE_PATH
@pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'] )
def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : List[str] , __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : List[str] ) -> int:
__A : Union[str, Any] = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path}
__A : int = input_paths[compression_format]
__A : Optional[Any] = tmp_path / 'cache'
__A : List[str] = DownloadConfig(cache_dir=__snake_case , extract_compressed_file=__snake_case )
__A : List[str] = cached_path(__snake_case , download_config=__snake_case )
with open(__snake_case ) as f:
__A : Tuple = f.read()
with open(__snake_case ) as f:
__A : Union[str, Any] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted' , [True, False] )
@pytest.mark.parametrize('default_cache_dir' , [True, False] )
def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : int , __snake_case : Tuple ) -> int:
__A : List[Any] = 'custom_cache'
__A : Tuple = 'custom_extracted_dir'
__A : Optional[Any] = tmp_path / 'custom_extracted_path'
if default_extracted:
__A : List[Any] = ('downloads' if default_cache_dir else custom_cache_dir, 'extracted')
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , __snake_case )
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(__snake_case ) )
__A : Optional[Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__A : List[Any] = xz_file
__A : Tuple = (
DownloadConfig(extract_compressed_file=__snake_case )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__snake_case )
)
__A : Tuple = cached_path(__snake_case , download_config=__snake_case )
assert Path(__snake_case ).parent.parts[-2:] == expected
def _lowerCAmelCase ( __snake_case : Any ) -> Tuple:
# absolute path
__A : Any = str(Path(__snake_case ).resolve() )
assert cached_path(__snake_case ) == text_file
# relative path
__A : List[Any] = str(Path(__snake_case ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__snake_case ) == text_file
def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Union[str, Any]:
# absolute path
__A : int = str(tmp_path.resolve() / '__missing_file__.txt' )
with pytest.raises(__snake_case ):
cached_path(__snake_case )
# relative path
__A : Union[str, Any] = './__missing_file__.txt'
with pytest.raises(__snake_case ):
cached_path(__snake_case )
def _lowerCAmelCase ( __snake_case : Union[str, Any] ) -> Tuple:
__A : List[Any] = get_from_cache(f'tmp://{tmpfs_file}' )
with open(__snake_case ) as f:
__A : Dict = f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE' , __snake_case )
def _lowerCAmelCase ( ) -> Any:
with pytest.raises(__snake_case ):
cached_path('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , __snake_case )
def _lowerCAmelCase ( __snake_case : Any ) -> Tuple:
__A : Dict = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(__snake_case ):
http_get('https://huggingface.co' , temp_file=__snake_case )
with pytest.raises(__snake_case ):
http_head('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , __snake_case )
def _lowerCAmelCase ( __snake_case : List[Any] ) -> str:
__A : int = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(__snake_case ):
ftp_get('ftp://huggingface.co' , temp_file=__snake_case )
with pytest.raises(__snake_case ):
ftp_head('ftp://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , __snake_case )
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Any:
__A : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(__snake_case ):
fsspec_get('s3://huggingface.co' , temp_file=__snake_case )
with pytest.raises(__snake_case ):
fsspec_head('s3://huggingface.co' ) | 8 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
'''simple docstring'''
__A : Optional[int] = parent
__A : str = 13
__A : List[Any] = 7
__A : List[str] = True
__A : str = True
__A : Optional[Any] = True
__A : int = True
__A : Dict = 99
__A : Dict = 384
__A : Any = 2
__A : int = 4
__A : Optional[Any] = 37
__A : Optional[int] = 'gelu'
__A : Dict = 0.1
__A : Optional[int] = 0.1
__A : Any = 512
__A : int = 16
__A : List[str] = 2
__A : str = 0.02
__A : Any = 3
__A : str = 4
__A : Union[str, Any] = 128
__A : int = 2
__A : List[Any] = 9
__A : List[Any] = 1
__A : List[Any] = None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__A : str = None
if self.use_input_mask:
__A : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__A : Optional[Any] = None
if self.use_token_type_ids:
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__A : Optional[int] = None
__A : List[str] = None
__A : Dict = None
if self.use_labels:
__A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__A : str = ids_tensor([self.batch_size] , self.num_choices)
__A : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = TFConvBertModel(config=_UpperCAmelCase)
__A : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__A : Tuple = [input_ids, input_mask]
__A : Any = model(_UpperCAmelCase)
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : str = TFConvBertForMaskedLM(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : str = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = self.num_labels
__A : Any = TFConvBertForSequenceClassification(config=_UpperCAmelCase)
__A : Optional[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = self.num_choices
__A : List[str] = TFConvBertForMultipleChoice(config=_UpperCAmelCase)
__A : int = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : List[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : int = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__A : Optional[Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = self.num_labels
__A : List[Any] = TFConvBertForTokenClassification(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : int = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = TFConvBertForQuestionAnswering(config=_UpperCAmelCase)
__A : Any = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Union[str, Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.prepare_config_and_inputs()
(
(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,
) : Union[str, Any] = config_and_inputs
__A : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = TFConvBertModelTester(self)
__A : str = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : List[str] = True
__A : List[str] = True
if hasattr(_UpperCAmelCase , 'use_cache'):
__A : List[Any] = True
__A : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : Union[str, Any] = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
for model_class in self.all_model_classes:
__A : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)
__A : Optional[int] = model_class(_UpperCAmelCase)
__A : Optional[Any] = len(model(_UpperCAmelCase))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase)
__A : Union[str, Any] = os.path.join(_UpperCAmelCase , 'saved_model' , '1')
__A : Tuple = tf.keras.models.load_model(_UpperCAmelCase)
__A : str = model(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Optional[int] = outputs['encoder_hidden_states']
__A : str = outputs['encoder_attentions']
else:
__A : List[Any] = outputs['hidden_states']
__A : Optional[Any] = outputs['attentions']
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
__A : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
self.assertListEqual(
list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
self.assertIsNotNone(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = True
__A : str = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length)
__A : Any = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : int = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
__A : Tuple = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
def check_decoder_attentions_output(_UpperCAmelCase):
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(out_len % 2 , 0)
__A : Any = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase):
__A : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__A : Dict = True
__A : Any = False
__A : str = model_class(_UpperCAmelCase)
__A : List[str] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_decoder_attentions_output(_UpperCAmelCase)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__A : int = True
__A : Tuple = model_class(_UpperCAmelCase)
__A : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
# Check attention is always last and order is fine
__A : Any = True
__A : str = True
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : Union[str, Any] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase))
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
@require_tf
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
__A : str = tf.constant([[0, 1, 2, 3, 4, 5]])
__A : Optional[int] = model(_UpperCAmelCase)[0]
__A : List[Any] = [1, 6, 768]
self.assertEqual(output.shape , _UpperCAmelCase)
__A : Tuple = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
])
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4) | 8 | 1 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Optional[int] = logging.get_logger()
def _lowerCAmelCase ( __snake_case : int , __snake_case : str , __snake_case : LevitConfig , __snake_case : Path , __snake_case : bool = True ) -> int:
print(f'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
__A : Tuple = timm.create_model('levit_128s' , pretrained=__snake_case )
else:
__A : Any = timm.create_model('levit_128' , pretrained=__snake_case )
if hidden_sizes == 1_92:
__A : Union[str, Any] = timm.create_model('levit_192' , pretrained=__snake_case )
if hidden_sizes == 2_56:
__A : List[Any] = timm.create_model('levit_256' , pretrained=__snake_case )
if hidden_sizes == 3_84:
__A : List[str] = timm.create_model('levit_384' , pretrained=__snake_case )
from_model.eval()
__A : Tuple = LevitForImageClassificationWithTeacher(__snake_case ).eval()
__A : List[Any] = OrderedDict()
__A : int = from_model.state_dict()
__A : List[Any] = list(from_model.state_dict().keys() )
__A : Optional[int] = list(our_model.state_dict().keys() )
print(len(__snake_case ) , len(__snake_case ) )
for i in range(len(__snake_case ) ):
__A : Optional[int] = weights[og_keys[i]]
our_model.load_state_dict(__snake_case )
__A : Tuple = torch.randn((2, 3, 2_24, 2_24) )
__A : Optional[int] = from_model(__snake_case )
__A : List[str] = our_model(__snake_case ).logits
assert torch.allclose(__snake_case , __snake_case ), "The model logits don't match the original one."
__A : List[Any] = name
print(__snake_case )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__A : Optional[int] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f'Pushed {checkpoint_name}' )
def _lowerCAmelCase ( __snake_case : Path , __snake_case : str = None , __snake_case : bool = True ) -> List[str]:
__A : Tuple = 'imagenet-1k-id2label.json'
__A : List[str] = 10_00
__A : Any = (1, num_labels)
__A : Dict = 'huggingface/label-files'
__A : Optional[Any] = num_labels
__A : str = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='dataset' ) , 'r' ) )
__A : Union[str, Any] = {int(__snake_case ): v for k, v in idalabel.items()}
__A : Tuple = idalabel
__A : Dict = {v: k for k, v in idalabel.items()}
__A : List[Any] = partial(__snake_case , num_labels=__snake_case , idalabel=__snake_case , labelaid=__snake_case )
__A : Optional[Any] = {
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
__A : Optional[Any] = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __snake_case , names_to_config[model_name] , __snake_case , __snake_case )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __snake_case , __snake_case , __snake_case , __snake_case )
return config, expected_shape
if __name__ == "__main__":
lowercase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''levit-dump-folder/''',
type=Path,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
lowercase__ : Optional[Any] = parser.parse_args()
lowercase__ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 8 |
'''simple docstring'''
import argparse
import os
import re
lowercase__ : Optional[int] = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
lowercase__ : Dict = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase__ : List[str] = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase__ : Tuple = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase__ : str = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase__ : str = re.compile(r'''\[([^\]]+)\]''')
def _lowerCAmelCase ( __snake_case : str ) -> Tuple:
__A : List[Any] = _re_indent.search(__snake_case )
return "" if search is None else search.groups()[0]
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : str="" , __snake_case : Any=None , __snake_case : List[Any]=None ) -> Optional[int]:
__A : Tuple = 0
__A : Optional[int] = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(__snake_case ):
index += 1
__A : Optional[int] = ['\n'.join(lines[:index] )]
else:
__A : Any = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__A : Tuple = [lines[index]]
index += 1
while index < len(__snake_case ) and (end_prompt is None or not lines[index].startswith(__snake_case )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__snake_case ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(__snake_case ) )
if index < len(__snake_case ) - 1:
__A : Union[str, Any] = [lines[index + 1]]
index += 1
else:
__A : Union[str, Any] = []
else:
blocks.append('\n'.join(__snake_case ) )
__A : Optional[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__snake_case ) > 0:
blocks.append('\n'.join(__snake_case ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__snake_case ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def _lowerCAmelCase ( __snake_case : List[Any] ) -> int:
def _inner(__snake_case : List[Any] ):
return key(__snake_case ).lower().replace('_' , '' )
return _inner
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any=None ) -> List[Any]:
# If no key is provided, we use a noop.
def noop(__snake_case : List[Any] ):
return x
if key is None:
__A : Optional[Any] = noop
# Constants are all uppercase, they go first.
__A : str = [obj for obj in objects if key(__snake_case ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__A : List[str] = [obj for obj in objects if key(__snake_case )[0].isupper() and not key(__snake_case ).isupper()]
# Functions begin with a lowercase, they go last.
__A : str = [obj for obj in objects if not key(__snake_case )[0].isupper()]
__A : Tuple = ignore_underscore(__snake_case )
return sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case )
def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Tuple:
# This inner function sort imports between [ ].
def _replace(__snake_case : Tuple ):
__A : List[str] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
__A : int = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__A : Dict = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(__snake_case )] ) + "]"
__A : List[Any] = import_statement.split('\n' )
if len(__snake_case ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__A : Optional[int] = 2 if lines[1].strip() == '[' else 1
__A : Any = [(i, _re_strip_line.search(__snake_case ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__A : Optional[int] = sort_objects(__snake_case , key=lambda __snake_case : x[1] )
__A : Any = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__snake_case ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__A : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
__A : Dict = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__A : Tuple = keys[:-1]
__A : List[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(__snake_case )] )
return "\n".join(__snake_case )
else:
# Finally we have to deal with imports fitting on one line
__A : Optional[Any] = _re_bracket_content.sub(_replace , __snake_case )
return import_statement
def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : List[Any]=True ) -> Optional[Any]:
with open(__snake_case , 'r' ) as f:
__A : Dict = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__A : str = split_code_in_indented_blocks(
__snake_case , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__snake_case ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__A : Tuple = main_blocks[block_idx]
__A : int = block.split('\n' )
# Get to the start of the imports.
__A : Tuple = 0
while line_idx < len(__snake_case ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__A : Optional[int] = len(__snake_case )
else:
line_idx += 1
if line_idx >= len(__snake_case ):
continue
# Ignore beginning and last line: they don't contain anything.
__A : Dict = '\n'.join(block_lines[line_idx:-1] )
__A : int = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__A : Optional[int] = split_code_in_indented_blocks(__snake_case , indent_level=__snake_case )
# We have two categories of import key: list or _import_structure[key].append/extend
__A : Any = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__A : Dict = [(pattern.search(__snake_case ).groups()[0] if pattern.search(__snake_case ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__A : Optional[Any] = [(i, key) for i, key in enumerate(__snake_case ) if key is not None]
__A : Tuple = [x[0] for x in sorted(__snake_case , key=lambda __snake_case : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__A : str = 0
__A : Any = []
for i in range(len(__snake_case ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__A : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__snake_case )
count += 1
# And we put our main block back together with its first and last line.
__A : int = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__snake_case ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(__snake_case , 'w' ) as f:
f.write('\n'.join(__snake_case ) )
def _lowerCAmelCase ( __snake_case : int=True ) -> Optional[Any]:
__A : Tuple = []
for root, _, files in os.walk(__snake_case ):
if "__init__.py" in files:
__A : List[Any] = sort_imports(os.path.join(__snake_case , '__init__.py' ) , check_only=__snake_case )
if result:
__A : Dict = [os.path.join(__snake_case , '__init__.py' )]
if len(__snake_case ) > 0:
raise ValueError(f'Would overwrite {len(__snake_case )} files, run `make style`.' )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowercase__ : Union[str, Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only) | 8 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowercase__ : Tuple = logging.get_logger('''transformers.models.encodec''')
lowercase__ : Dict = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
lowercase__ : Dict = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
lowercase__ : Dict = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
lowercase__ : Optional[Any] = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
lowercase__ : Any = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
lowercase__ : Tuple = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowercase__ : List[str] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowercase__ : Tuple = []
lowercase__ : Dict = []
def _lowerCAmelCase ( __snake_case : str , __snake_case : Any , __snake_case : int , __snake_case : Tuple , __snake_case : List[str] ) -> Union[str, Any]:
for attribute in key.split('.' ):
__A : str = getattr(__snake_case , __snake_case )
if weight_type is not None:
__A : Optional[int] = getattr(__snake_case , __snake_case ).shape
else:
__A : List[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
__A : List[str] = value
elif weight_type == "weight_g":
__A : Dict = value
elif weight_type == "weight_v":
__A : Tuple = value
elif weight_type == "bias":
__A : Dict = value
elif weight_type == "running_mean":
__A : Any = value
elif weight_type == "running_var":
__A : str = value
elif weight_type == "num_batches_tracked":
__A : str = value
elif weight_type == "weight_ih_l0":
__A : Dict = value
elif weight_type == "weight_hh_l0":
__A : List[str] = value
elif weight_type == "bias_ih_l0":
__A : Dict = value
elif weight_type == "bias_hh_l0":
__A : Any = value
elif weight_type == "weight_ih_l1":
__A : Optional[Any] = value
elif weight_type == "weight_hh_l1":
__A : int = value
elif weight_type == "bias_ih_l1":
__A : Optional[int] = value
elif weight_type == "bias_hh_l1":
__A : Tuple = value
else:
__A : Union[str, Any] = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Tuple ) -> List[Any]:
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__A ,__A : str = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _lowerCAmelCase ( __snake_case : int , __snake_case : Tuple , __snake_case : Any ) -> str:
__A : Optional[Any] = []
if model_name == "encodec_24khz" or "encodec_32khz":
__A : Tuple = MAPPING_24K
elif model_name == "encodec_48khz":
__A : str = MAPPING_48K
else:
raise ValueError(f'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(__snake_case , __snake_case ):
logger.info(f'{name} was ignored' )
continue
__A : Optional[Any] = False
for key, mapped_key in MAPPING.items():
if "*" in key:
__A ,__A : Dict = key.split('.*.' )
if prefix in name and suffix in name:
__A : Any = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
__A : List[str] = True
if "*" in mapped_key:
__A : Optional[int] = name.split(__snake_case )[0].split('.' )[-2]
__A : str = mapped_key.replace('*' , __snake_case )
if "weight_g" in name:
__A : str = 'weight_g'
elif "weight_v" in name:
__A : int = 'weight_v'
elif "weight_ih_l0" in name:
__A : int = 'weight_ih_l0'
elif "weight_hh_l0" in name:
__A : List[Any] = 'weight_hh_l0'
elif "bias_ih_l0" in name:
__A : Optional[Any] = 'bias_ih_l0'
elif "bias_hh_l0" in name:
__A : int = 'bias_hh_l0'
elif "weight_ih_l1" in name:
__A : List[Any] = 'weight_ih_l1'
elif "weight_hh_l1" in name:
__A : Union[str, Any] = 'weight_hh_l1'
elif "bias_ih_l1" in name:
__A : Dict = 'bias_ih_l1'
elif "bias_hh_l1" in name:
__A : Union[str, Any] = 'bias_hh_l1'
elif "bias" in name:
__A : List[str] = 'bias'
elif "weight" in name:
__A : List[Any] = 'weight'
elif "running_mean" in name:
__A : List[Any] = 'running_mean'
elif "running_var" in name:
__A : int = 'running_var'
elif "num_batches_tracked" in name:
__A : Optional[int] = 'num_batches_tracked'
else:
__A : List[Any] = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f'Unused weights: {unused_weights}' )
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : str=None , __snake_case : str=None , ) -> Optional[Any]:
if config_path is not None:
__A : Dict = EncodecConfig.from_pretrained(__snake_case )
else:
__A : List[Any] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
__A : Union[str, Any] = [8, 5, 4, 4]
__A : Optional[int] = [2.2]
__A : Optional[int] = 64
__A : List[str] = 3_20_00
__A : Tuple = 20_48
__A : Tuple = False
__A : Optional[Any] = False
__A : Optional[int] = False
elif model_name == "encodec_48khz":
__A : Optional[int] = [8, 5, 4, 2]
__A : Any = [3.0, 6.0, 12.0, 24.0]
__A : Dict = 4_80_00
__A : List[str] = 2
__A : Optional[int] = False
__A : Any = 'time_group_norm'
__A : Optional[int] = True
__A : int = 1.0
__A : int = 0.01
else:
raise ValueError(f'Unknown model name: {model_name}' )
__A : Union[str, Any] = EncodecModel(__snake_case )
__A : int = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(__snake_case )
__A : List[Any] = torch.load(__snake_case )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
__A : Dict = original_checkpoint['best_state']
recursively_load_weights(__snake_case , __snake_case , __snake_case )
model.save_pretrained(__snake_case )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(__snake_case )
model.push_to_hub(__snake_case )
if __name__ == "__main__":
lowercase__ : str = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
lowercase__ : str = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
) | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int ) -> bool:
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
lowercase__ : int = int(input('''Enter number: ''').strip())
print(f"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""") | 8 | 1 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE :
def __init__( self):
'''simple docstring'''
__A : dict[str, TrieNode] = {} # Mapping from char to TrieNode
__A : Optional[Any] = False
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
for word in words:
self.insert(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = self
for char in word:
if char not in curr.nodes:
__A : Optional[Any] = TrieNode()
__A : Any = curr.nodes[char]
__A : str = True
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Union[str, Any] = self
for char in word:
if char not in curr.nodes:
return False
__A : Optional[int] = curr.nodes[char]
return curr.is_leaf
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
def _delete(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) -> bool:
if index == len(_UpperCAmelCase):
# If word does not exist
if not curr.is_leaf:
return False
__A : Any = False
return len(curr.nodes) == 0
__A : Dict = word[index]
__A : Optional[Any] = curr.nodes.get(_UpperCAmelCase)
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
__A : int = _delete(_UpperCAmelCase , _UpperCAmelCase , index + 1)
if delete_curr:
del curr.nodes[char]
return len(curr.nodes) == 0
return delete_curr
_delete(self , _UpperCAmelCase , 0)
def _lowerCAmelCase ( __snake_case : TrieNode , __snake_case : str ) -> None:
if node.is_leaf:
print(__snake_case , end=' ' )
for key, value in node.nodes.items():
print_words(__snake_case , word + key )
def _lowerCAmelCase ( ) -> bool:
__A : Tuple = 'banana bananas bandana band apple all beast'.split()
__A : Optional[Any] = TrieNode()
root.insert_many(__snake_case )
# print_words(root, "")
assert all(root.find(__snake_case ) for word in words )
assert root.find('banana' )
assert not root.find('bandanas' )
assert not root.find('apps' )
assert root.find('apple' )
assert root.find('all' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def _lowerCAmelCase ( __snake_case : str , __snake_case : bool ) -> None:
print(str(__snake_case ) , 'works!' if passes else 'doesn\'t work :(' )
def _lowerCAmelCase ( ) -> None:
assert test_trie()
def _lowerCAmelCase ( ) -> None:
print_results('Testing trie functionality' , test_trie() )
if __name__ == "__main__":
main() | 8 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : str = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Tuple:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__A : Optional[Any] = k.replace(__snake_case , __snake_case )
if k.startswith('encoder' ):
__A : Any = k.replace('.attn' , '.self_attn' )
__A : Any = k.replace('norm1' , 'self_attn_layer_norm' )
__A : str = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
__A : Tuple = k.replace('norm1' , 'self_attn_layer_norm' )
__A : str = k.replace('norm2' , 'encoder_attn_layer_norm' )
__A : int = k.replace('norm3' , 'final_layer_norm' )
return k
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Dict:
__A : Optional[int] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
__A : Tuple = sd.pop(__snake_case )
__A : Union[str, Any] = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
__A : str = v
lowercase__ : Tuple = ['''START''']
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any , __snake_case : List[Any] ) -> int:
__A : List[str] = torch.load(__snake_case , map_location='cpu' )
__A : Tuple = model['model']
__A : str = BlenderbotConfig.from_json_file(__snake_case )
__A : int = BlenderbotForConditionalGeneration(__snake_case )
__A : List[Any] = m.model.state_dict().keys()
__A : Optional[int] = []
__A : Optional[int] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__A : Union[str, Any] = rename_state_dict_key(__snake_case )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__A : Optional[Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__snake_case )
m.model.load_state_dict(__snake_case , strict=__snake_case )
m.half()
m.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json) | 8 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
'''simple docstring'''
__A : Optional[Any] = parent
__A : List[Any] = 13
__A : str = 7
__A : Optional[Any] = True
__A : List[str] = True
__A : Union[str, Any] = True
__A : Dict = True
__A : int = 99
__A : Optional[Any] = 32
__A : Tuple = 2
__A : str = 4
__A : str = 37
__A : List[str] = 'gelu'
__A : str = 0.1
__A : List[Any] = 0.1
__A : Optional[Any] = 512
__A : Any = 16
__A : Optional[int] = 2
__A : Dict = 0.02
__A : Union[str, Any] = 3
__A : List[Any] = 4
__A : Dict = None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__A : str = None
if self.use_input_mask:
__A : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length])
__A : int = None
if self.use_token_type_ids:
__A : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__A : str = None
__A : str = None
__A : Any = None
if self.use_labels:
__A : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__A : int = ids_tensor([self.batch_size] , self.num_choices)
__A : Optional[int] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = TFRoFormerModel(config=_UpperCAmelCase)
__A : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__A : List[Any] = [input_ids, input_mask]
__A : str = model(_UpperCAmelCase)
__A : Tuple = model(_UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = True
__A : List[Any] = TFRoFormerForCausalLM(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Tuple = model(_UpperCAmelCase)['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape) , [self.batch_size, self.seq_length, self.vocab_size])
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[str] = TFRoFormerForMaskedLM(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Union[str, Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[str] = self.num_labels
__A : Tuple = TFRoFormerForSequenceClassification(config=_UpperCAmelCase)
__A : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : int = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = self.num_choices
__A : List[str] = TFRoFormerForMultipleChoice(config=_UpperCAmelCase)
__A : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : Dict = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : Any = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : str = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__A : List[Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[str] = self.num_labels
__A : List[Any] = TFRoFormerForTokenClassification(config=_UpperCAmelCase)
__A : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : List[str] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : str = TFRoFormerForQuestionAnswering(config=_UpperCAmelCase)
__A : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : List[Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = self.prepare_config_and_inputs()
(
(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,
) : Optional[Any] = config_and_inputs
__A : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = TFRoFormerModelTester(self)
__A : Tuple = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base')
self.assertIsNotNone(_UpperCAmelCase)
@require_tf
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base')
__A : str = tf.constant([[0, 1, 2, 3, 4, 5]])
__A : Tuple = model(_UpperCAmelCase)[0]
# TODO Replace vocab size
__A : Tuple = 5_0000
__A : Any = [1, 6, vocab_size]
self.assertEqual(output.shape , _UpperCAmelCase)
print(output[:, :3, :3])
# TODO Replace values below with what was printed above.
__A : int = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
])
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4)
@require_tf
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
lowerCAmelCase = 1E-4
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = tf.constant([[4, 10]])
__A : Optional[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6)
__A : List[str] = emba(input_ids.shape)
__A : int = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]])
tf.debugging.assert_near(_UpperCAmelCase , _UpperCAmelCase , atol=self.tolerance)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
])
__A : Any = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512)
emba([2, 16, 512])
__A : Tuple = emba.weight[:3, :5]
tf.debugging.assert_near(_UpperCAmelCase , _UpperCAmelCase , atol=self.tolerance)
@require_tf
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
lowerCAmelCase = 1E-4
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa) , shape=(2, 12, 16, 64)) / 100
__A : Dict = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa) , shape=(2, 12, 16, 64)) / 100
__A : List[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64)
__A : str = embed_positions([2, 16, 768])[None, None, :, :]
__A ,__A : Tuple = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : Dict = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
])
__A : Optional[int] = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
])
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , _UpperCAmelCase , atol=self.tolerance)
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , _UpperCAmelCase , atol=self.tolerance) | 8 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None):
'''simple docstring'''
__A : List[Any] = list(poly_a or [0])[:]
__A : Optional[int] = list(poly_b or [0])[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
__A : Union[str, Any] = len(self.polyA)
while self.polyB[-1] == 0:
self.polyB.pop()
__A : Optional[int] = len(self.polyB)
# Add 0 to make lengths equal a power of 2
__A : Optional[Any] = int(
2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1)))
while len(self.polyA) < self.c_max_length:
self.polyA.append(0)
while len(self.polyB) < self.c_max_length:
self.polyB.append(0)
# A complex root used for the fourier transform
__A : str = complex(mpmath.root(x=1 , n=self.c_max_length , k=1))
# The product
__A : Tuple = self.__multiply()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(_UpperCAmelCase) <= 1:
return dft[0]
#
__A : Dict = self.c_max_length // 2
while next_ncol > 0:
__A : Optional[Any] = [[] for i in range(_UpperCAmelCase)]
__A : Tuple = self.root**next_ncol
# First half of next step
__A : Optional[Any] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(_UpperCAmelCase):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j])
current_root *= root
# Second half of next step
__A : List[str] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(_UpperCAmelCase):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j])
current_root *= root
# Update
__A : Optional[int] = new_dft
__A : Tuple = next_ncol // 2
return dft[0]
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.__dft('A')
__A : Optional[Any] = self.__dft('B')
__A : str = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0]) <= 1:
return inverce_c[0]
# Inverse DFT
__A : Dict = 2
while next_ncol <= self.c_max_length:
__A : Optional[int] = [[] for i in range(_UpperCAmelCase)]
__A : Any = self.root ** (next_ncol // 2)
__A : Tuple = 1
# First half of next step
for j in range(self.c_max_length // next_ncol):
for i in range(next_ncol // 2):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2)
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root))
current_root *= root
# Update
__A : int = new_inverse_c
next_ncol *= 2
# Unpack
__A : Optional[int] = [round(x[0].real , 8) + round(x[0].imag , 8) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self):
'''simple docstring'''
__A : int = 'A = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A]))
__A : Optional[Any] = 'B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B]))
__A : str = 'A*B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.product))
return F'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Optional[int] = logging.get_logger(__name__)
def _lowerCAmelCase ( __snake_case : Tuple , __snake_case : List[str]=False ) -> Union[str, Any]:
__A : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__A : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : List[Any]=False ) -> str:
for i in range(config.num_hidden_layers ):
if base_model:
__A : Union[str, Any] = ''
else:
__A : List[Any] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__A : Optional[int] = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
__A : str = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__A : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
__A : Union[str, Any] = in_proj_bias[: config.hidden_size]
__A : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__A : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__A : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
__A : Dict = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( __snake_case : List[Any] ) -> List[Any]:
__A : Optional[int] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def _lowerCAmelCase ( __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> Dict:
__A : int = dct.pop(__snake_case )
__A : Optional[Any] = val
def _lowerCAmelCase ( ) -> str:
__A : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__A : List[Any] = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : Tuple ) -> List[str]:
__A : str = ViTConfig()
__A : Optional[int] = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
__A : Tuple = True
__A : Union[str, Any] = int(vit_name[-12:-10] )
__A : Tuple = int(vit_name[-9:-6] )
else:
__A : Optional[int] = 10_00
__A : Tuple = 'huggingface/label-files'
__A : Dict = 'imagenet-1k-id2label.json'
__A : Tuple = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='dataset' ) , 'r' ) )
__A : List[str] = {int(__snake_case ): v for k, v in idalabel.items()}
__A : str = idalabel
__A : str = {v: k for k, v in idalabel.items()}
__A : List[Any] = int(vit_name[-6:-4] )
__A : Any = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
__A : Any = 1_92
__A : Tuple = 7_68
__A : Any = 12
__A : Dict = 3
elif vit_name[9:].startswith('small' ):
__A : Any = 3_84
__A : List[Any] = 15_36
__A : int = 12
__A : Union[str, Any] = 6
else:
pass
else:
if vit_name[4:].startswith('small' ):
__A : str = 7_68
__A : List[str] = 23_04
__A : Any = 8
__A : str = 8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
__A : Any = 10_24
__A : List[Any] = 40_96
__A : Optional[int] = 24
__A : str = 16
elif vit_name[4:].startswith('huge' ):
__A : str = 12_80
__A : Any = 51_20
__A : Union[str, Any] = 32
__A : List[Any] = 16
# load original model from timm
__A : Optional[Any] = timm.create_model(__snake_case , pretrained=__snake_case )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__A : str = timm_model.state_dict()
if base_model:
remove_classification_head_(__snake_case )
__A : Tuple = create_rename_keys(__snake_case , __snake_case )
for src, dest in rename_keys:
rename_key(__snake_case , __snake_case , __snake_case )
read_in_q_k_v(__snake_case , __snake_case , __snake_case )
# load HuggingFace model
if vit_name[-5:] == "in21k":
__A : str = ViTModel(__snake_case ).eval()
else:
__A : Optional[int] = ViTForImageClassification(__snake_case ).eval()
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
__A : Optional[int] = DeiTImageProcessor(size=config.image_size )
else:
__A : Optional[Any] = ViTImageProcessor(size=config.image_size )
__A : List[str] = image_processor(images=prepare_img() , return_tensors='pt' )
__A : Tuple = encoding['pixel_values']
__A : Any = model(__snake_case )
if base_model:
__A : str = timm_model.forward_features(__snake_case )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__snake_case , outputs.pooler_output , atol=1e-3 )
else:
__A : int = timm_model(__snake_case )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__snake_case , outputs.logits , atol=1e-3 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__snake_case )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_patch16_224''',
type=str,
help='''Name of the ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowercase__ : Tuple = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path) | 8 |
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=[30, 30] , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=None , _UpperCAmelCase=8 , _UpperCAmelCase=10 , ):
'''simple docstring'''
__A : Union[str, Any] = parent
__A : Tuple = batch_size
__A : List[str] = image_size
__A : Dict = patch_size
__A : Optional[Any] = num_channels
__A : Tuple = is_training
__A : Dict = use_labels
__A : List[Any] = hidden_size
__A : Tuple = num_hidden_layers
__A : int = num_attention_heads
__A : Optional[int] = intermediate_size
__A : Tuple = hidden_act
__A : Any = hidden_dropout_prob
__A : Optional[Any] = attention_probs_dropout_prob
__A : List[Any] = type_sequence_label_size
__A : List[Any] = initializer_range
__A : Optional[int] = num_labels
__A : List[Any] = scope
__A : Any = n_targets
__A : Union[str, Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
__A : List[str] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
__A : int = num_patches + 1 + self.num_detection_tokens
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]])
__A : Tuple = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
__A : List[Any] = []
for i in range(self.batch_size):
__A : Optional[int] = {}
__A : Union[str, Any] = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=_UpperCAmelCase)
__A : str = torch.rand(self.n_targets , 4 , device=_UpperCAmelCase)
labels.append(_UpperCAmelCase)
__A : Any = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = YolosModel(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = YolosForObjectDetection(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : str = model(pixel_values=_UpperCAmelCase)
__A : List[str] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
__A : Union[str, Any] = model(pixel_values=_UpperCAmelCase , labels=_UpperCAmelCase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.prepare_config_and_inputs()
__A ,__A ,__A : Tuple = config_and_inputs
__A : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCAmelCase = (
{'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
__A : Optional[Any] = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase)
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
__A : Any = []
for i in range(self.model_tester.batch_size):
__A : Tuple = {}
__A : Tuple = torch.ones(
size=(self.model_tester.n_targets,) , device=_UpperCAmelCase , dtype=torch.long)
__A : Optional[Any] = torch.ones(
self.model_tester.n_targets , 4 , device=_UpperCAmelCase , dtype=torch.float)
labels.append(_UpperCAmelCase)
__A : str = labels
return inputs_dict
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = YolosModelTester(self)
__A : Dict = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Tuple = model_class(_UpperCAmelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__A : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[Any] = model_class(_UpperCAmelCase)
__A : str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : int = [*signature.parameters.keys()]
__A : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__A : Optional[int] = True
# in YOLOS, the seq_len is different
__A : Dict = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
__A : Dict = True
__A : Dict = False
__A : Union[str, Any] = True
__A : Tuple = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : Any = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Union[str, Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__A : List[Any] = True
__A : List[str] = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Optional[Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__A : str = len(_UpperCAmelCase)
# Check attention is always last and order is fine
__A : Dict = True
__A : Dict = True
__A : Dict = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Union[str, Any] = 1
self.assertEqual(out_len + added_hidden_states , len(_UpperCAmelCase))
__A : Optional[Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
__A : Tuple = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Optional[Any] = outputs.hidden_states
__A : List[str] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
# YOLOS has a different seq_length
__A : Dict = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[str] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : Optional[int] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : List[Any] = YolosModel.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
def _lowerCAmelCase ( ) -> int:
__A : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('hustvl/yolos-small') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = YolosForObjectDetection.from_pretrained('hustvl/yolos-small').to(_UpperCAmelCase)
__A : Any = self.default_image_processor
__A : str = prepare_img()
__A : int = image_processor(images=_UpperCAmelCase , return_tensors='pt').to(_UpperCAmelCase)
# forward pass
with torch.no_grad():
__A : str = model(inputs.pixel_values)
# verify outputs
__A : Tuple = torch.Size((1, 100, 92))
self.assertEqual(outputs.logits.shape , _UpperCAmelCase)
__A : Dict = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=_UpperCAmelCase , )
__A : int = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=_UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _UpperCAmelCase , atol=1e-4))
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _UpperCAmelCase , atol=1e-4))
# verify postprocessing
__A : List[str] = image_processor.post_process_object_detection(
_UpperCAmelCase , threshold=0.3 , target_sizes=[image.size[::-1]])[0]
__A : Optional[int] = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861]).to(_UpperCAmelCase)
__A : Union[str, Any] = [75, 75, 17, 63, 17]
__A : Any = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495]).to(_UpperCAmelCase)
self.assertEqual(len(results['scores']) , 5)
self.assertTrue(torch.allclose(results['scores'] , _UpperCAmelCase , atol=1e-4))
self.assertSequenceEqual(results['labels'].tolist() , _UpperCAmelCase)
self.assertTrue(torch.allclose(results['boxes'][0, :] , _UpperCAmelCase)) | 8 | 1 |
'''simple docstring'''
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class SCREAMING_SNAKE_CASE (a__ ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
'''simple docstring'''
super().__init__()
__A : List[Any] = value_function
__A : str = unet
__A : Tuple = scheduler
__A : List[Any] = env
__A : List[str] = env.get_dataset()
__A : List[str] = {}
for key in self.data.keys():
try:
__A : Optional[int] = self.data[key].mean()
except: # noqa: E722
pass
__A : str = {}
for key in self.data.keys():
try:
__A : Optional[int] = self.data[key].std()
except: # noqa: E722
pass
__A : Optional[Any] = env.observation_space.shape[0]
__A : List[Any] = env.action_space.shape[0]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if type(_UpperCAmelCase) is dict:
return {k: self.to_torch(_UpperCAmelCase) for k, v in x_in.items()}
elif torch.is_tensor(_UpperCAmelCase):
return x_in.to(self.unet.device)
return torch.tensor(_UpperCAmelCase , device=self.unet.device)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
for key, val in cond.items():
__A : List[Any] = val.clone()
return x_in
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[str] = x.shape[0]
__A : Union[str, Any] = None
for i in tqdm.tqdm(self.scheduler.timesteps):
# create batch of timesteps to pass into model
__A : str = torch.full((batch_size,) , _UpperCAmelCase , device=self.unet.device , dtype=torch.long)
for _ in range(_UpperCAmelCase):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
__A : Optional[int] = self.value_function(x.permute(0 , 2 , 1) , _UpperCAmelCase).sample
__A : int = torch.autograd.grad([y.sum()] , [x])[0]
__A : Optional[Any] = self.scheduler._get_variance(_UpperCAmelCase)
__A : int = torch.exp(0.5 * posterior_variance)
__A : int = model_std * grad
__A : Optional[Any] = 0
__A : List[str] = x.detach()
__A : Tuple = x + scale * grad
__A : Optional[Any] = self.reset_xa(_UpperCAmelCase , _UpperCAmelCase , self.action_dim)
__A : int = self.unet(x.permute(0 , 2 , 1) , _UpperCAmelCase).sample.permute(0 , 2 , 1)
# TODO: verify deprecation of this kwarg
__A : int = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , predict_epsilon=_UpperCAmelCase)['prev_sample']
# apply conditions to the trajectory (set the initial state)
__A : Optional[int] = self.reset_xa(_UpperCAmelCase , _UpperCAmelCase , self.action_dim)
__A : Dict = self.to_torch(_UpperCAmelCase)
return x, y
def __call__( self , _UpperCAmelCase , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1):
'''simple docstring'''
__A : str = self.normalize(_UpperCAmelCase , 'observations')
__A : str = obs[None].repeat(_UpperCAmelCase , axis=0)
__A : Any = {0: self.to_torch(_UpperCAmelCase)}
__A : Any = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
__A : Tuple = randn_tensor(_UpperCAmelCase , device=self.unet.device)
__A : Tuple = self.reset_xa(_UpperCAmelCase , _UpperCAmelCase , self.action_dim)
__A : Tuple = self.to_torch(_UpperCAmelCase)
# run the diffusion process
__A ,__A : Any = self.run_diffusion(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# sort output trajectories by value
__A : Any = y.argsort(0 , descending=_UpperCAmelCase).squeeze()
__A : str = x[sorted_idx]
__A : Dict = sorted_values[:, :, : self.action_dim]
__A : Dict = actions.detach().cpu().numpy()
__A : Dict = self.de_normalize(_UpperCAmelCase , key='actions')
# select the action with the highest value
if y is not None:
__A : int = 0
else:
# if we didn't run value guiding, select a random action
__A : Any = np.random.randint(0 , _UpperCAmelCase)
__A : str = denorm_actions[selected_index, 0]
return denorm_actions | 8 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
lowercase__ : Optional[int] = None
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : List[str] = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
lowercase__ : Dict = {
'''camembert-base''': 5_12,
}
lowercase__ : str = '''▁'''
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
lowerCAmelCase = CamembertTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , **_UpperCAmelCase , ):
'''simple docstring'''
__A : int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else mask_token
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__A : List[str] = vocab_file
__A : Optional[int] = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A : Optional[Any] = [self.cls_token_id]
__A : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
__A : Optional[int] = [self.sep_token_id]
__A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(_UpperCAmelCase):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__A : List[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_UpperCAmelCase):
copyfile(self.vocab_file , _UpperCAmelCase)
return (out_vocab_file,) | 8 | 1 |
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowercase__ : List[str] = object()
# For specifying empty leaf dict `{}`
lowercase__ : str = object()
def _lowerCAmelCase ( __snake_case : Any , __snake_case : Dict ) -> Optional[Any]:
__A : List[Any] = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(__snake_case ) - len(__snake_case ) + 1 ):
__A : List[str] = [x.match(__snake_case ) for x, y in zip(__snake_case , ks[i:] )]
if matches and all(__snake_case ):
return True
return False
def _lowerCAmelCase ( __snake_case : Tuple ) -> Optional[Any]:
def replace(__snake_case : int , __snake_case : Optional[Any] ):
for rule, replacement in rules:
if _match(__snake_case , __snake_case ):
return replacement
return val
return replace
def _lowerCAmelCase ( ) -> Dict:
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , __snake_case )),
(("transformer", "wte", "embedding"), P('mp' , __snake_case )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__snake_case , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , __snake_case )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__snake_case , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , __snake_case )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _lowerCAmelCase ( __snake_case : str ) -> Tuple:
__A : List[str] = _get_partition_rules()
__A : Dict = _replacement_rules(__snake_case )
__A : Dict = {k: _unmatched for k in flatten_dict(__snake_case )}
__A : Union[str, Any] = {k: replace(__snake_case , __snake_case ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__snake_case ) ) | 8 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowercase__ : Any = '''hf-internal-testing/tiny-random-bert'''
lowercase__ : Optional[Any] = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
lowercase__ : List[Any] = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase)
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_UpperCAmelCase))
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_UpperCAmelCase , _UpperCAmelCase)))
with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f:
__A : Any = f.read()
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase))
self.assertTrue(os.path.isfile(_UpperCAmelCase))
# File is cached at the same place the second time.
__A : Tuple = cached_file(_UpperCAmelCase , _UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
# Using a specific revision to test the full commit hash.
__A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='9b8c223')
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'):
__A : Dict = cached_file('tiny-random-bert' , _UpperCAmelCase)
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'):
__A : Optional[int] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='aaaa')
with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'):
__A : int = cached_file(_UpperCAmelCase , 'conf')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'):
__A : Any = cached_file(_UpperCAmelCase , 'conf')
with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f:
__A : Dict = f.read()
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , '.no_exist' , _UpperCAmelCase , 'conf')))
__A : List[Any] = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_missing_entries=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
__A : str = cached_file(_UpperCAmelCase , 'conf' , local_files_only=_UpperCAmelCase , _raise_exceptions_for_missing_entries=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
__A : List[str] = mock.Mock()
__A : Dict = 500
__A : List[str] = {}
__A : List[Any] = HTTPError
__A : Optional[Any] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=_UpperCAmelCase) as mock_head:
__A : Dict = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_connection_errors=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt'))
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'):
get_file_from_repo('bert-base-case' , _UpperCAmelCase)
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'):
get_file_from_repo('bert-base-cased' , _UpperCAmelCase , revision='ahaha')
__A : List[str] = get_file_from_repo('bert-base-cased' , _UpperCAmelCase)
# The name is the cached name which is not very easy to test, so instead we load the content.
__A : List[str] = json.loads(open(_UpperCAmelCase , 'r').read())
self.assertEqual(config['hidden_size'] , 768)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__A : Tuple = Path(_UpperCAmelCase) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(_UpperCAmelCase , 'a.txt') , str(_UpperCAmelCase))
self.assertIsNone(get_file_from_repo(_UpperCAmelCase , 'b.txt')) | 8 | 1 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
lowercase__ : Tuple = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE (a__ ):
def __init__( self , _UpperCAmelCase=None , **_UpperCAmelCase):
'''simple docstring'''
warnings.warn(
'`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '
'instead.' , _UpperCAmelCase , )
super().__init__(args=_UpperCAmelCase , **_UpperCAmelCase) | 8 |
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _lowerCAmelCase ( __snake_case : str , __snake_case : str , **__snake_case : List[Any] ) -> Any:
__A : Optional[Any] = AutoConfig.from_pretrained(__snake_case , **__snake_case )
__A : int = AutoModelForSeqaSeqLM.from_config(__snake_case )
model.save_pretrained(__snake_case )
AutoTokenizer.from_pretrained(__snake_case ).save_pretrained(__snake_case )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version) | 8 | 1 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
lowercase__ : int = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def _lowerCAmelCase ( __snake_case : Optional[int] ) -> int:
for pegasus_name, hf_name in PATTERNS:
__A : Optional[Any] = k.replace(__snake_case , __snake_case )
return k
def _lowerCAmelCase ( __snake_case : dict , __snake_case : dict ) -> PegasusForConditionalGeneration:
__A : Dict = DEFAULTS.copy()
cfg_kwargs.update(__snake_case )
__A : List[Any] = PegasusConfig(**__snake_case )
__A : Dict = PegasusForConditionalGeneration(__snake_case )
__A : Tuple = torch_model.model.state_dict()
__A : int = {}
for k, v in tf_weights.items():
__A : int = rename_state_dict_key(__snake_case )
if new_k not in sd:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if "dense" in k or "proj" in new_k:
__A : int = v.T
__A : Optional[int] = torch.tensor(__snake_case , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'{new_k}, {k}, {v.shape}, {sd[new_k].shape}'
# make sure embedding.padding_idx is respected
__A : Optional[Any] = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
__A : Tuple = mapping['shared.weight']
__A : Dict = mapping['shared.weight']
__A : int = {k: torch.zeros_like(__snake_case ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**__snake_case )
__A ,__A : List[Any] = torch_model.model.load_state_dict(__snake_case , strict=__snake_case )
__A : int = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def _lowerCAmelCase ( __snake_case : List[Any]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
__A : str = tf.train.list_variables(__snake_case )
__A : List[Any] = {}
__A : str = ['Adafactor', 'global_step']
for name, shape in tqdm(__snake_case , desc='converting tf checkpoint to dict' ):
__A : Optional[int] = any(pat in name for pat in ignore_name )
if skip_key:
continue
__A : Tuple = tf.train.load_variable(__snake_case , __snake_case )
__A : Dict = array
return tf_weights
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> Tuple:
# save tokenizer first
__A : int = Path(__snake_case ).parent.name
__A : int = task_specific_params[f'summarization_{dataset}']['max_position_embeddings']
__A : str = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=__snake_case )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__snake_case )
# convert model
__A : Dict = get_tf_weights_as_numpy(__snake_case )
__A : List[str] = task_specific_params[f'summarization_{dataset}']
if dataset == "large":
__A : str = task_specific_params
__A : Any = convert_pegasus(__snake_case , __snake_case )
torch_model.save_pretrained(__snake_case )
__A : Optional[int] = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(__snake_case , Path(__snake_case ) / 'pytorch_model.bin' )
if __name__ == "__main__":
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowercase__ : Any = parser.parse_args()
if args.save_dir is None:
lowercase__ : Tuple = Path(args.tf_ckpt_path).parent.name
lowercase__ : Any = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir) | 8 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
lowercase__ : Any = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''tapas'''
def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1024 , _UpperCAmelCase=[3, 256, 256, 2, 256, 256, 10] , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase=10.0 , _UpperCAmelCase=0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase="ratio" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase)
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__A : Dict = vocab_size
__A : Tuple = hidden_size
__A : Any = num_hidden_layers
__A : int = num_attention_heads
__A : Tuple = hidden_act
__A : Tuple = intermediate_size
__A : List[Any] = hidden_dropout_prob
__A : int = attention_probs_dropout_prob
__A : List[str] = max_position_embeddings
__A : Optional[int] = type_vocab_sizes
__A : str = initializer_range
__A : List[str] = layer_norm_eps
# Fine-tuning task hyperparameters
__A : List[str] = positive_label_weight
__A : List[Any] = num_aggregation_labels
__A : Optional[Any] = aggregation_loss_weight
__A : Tuple = use_answer_as_supervision
__A : List[str] = answer_loss_importance
__A : Any = use_normalized_answer_loss
__A : Any = huber_loss_delta
__A : Union[str, Any] = temperature
__A : Tuple = aggregation_temperature
__A : Optional[Any] = use_gumbel_for_cells
__A : List[str] = use_gumbel_for_aggregation
__A : Tuple = average_approximation_function
__A : List[str] = cell_selection_preference
__A : Dict = answer_loss_cutoff
__A : Union[str, Any] = max_num_rows
__A : Optional[Any] = max_num_columns
__A : int = average_logits_per_cell
__A : Optional[Any] = select_one_column
__A : int = allow_empty_column_selection
__A : List[Any] = init_cell_selection_weights_to_zero
__A : int = reset_position_index_per_cell
__A : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
__A : Optional[Any] = aggregation_labels
__A : List[str] = no_aggregation_label_index
if isinstance(self.aggregation_labels , _UpperCAmelCase):
__A : Optional[Any] = {int(_UpperCAmelCase): v for k, v in aggregation_labels.items()} | 8 | 1 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase__ : Union[str, Any] = get_tests_dir('''fixtures''')
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = mock.Mock()
__A : int = 500
__A : Dict = {}
__A : List[Any] = HTTPError
__A : List[str] = {}
# Download this model to make sure it's in the cache.
__A : int = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2')
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=_UpperCAmelCase) as mock_head:
__A : str = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2')
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json')
@is_staging_test
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE ( cls):
'''simple docstring'''
__A : Union[str, Any] = TOKEN
HfFolder.save_token(_UpperCAmelCase)
@classmethod
def SCREAMING_SNAKE_CASE ( cls):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-feature-extractor')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-feature-extractor-org')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-feature-extractor')
except HTTPError:
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = WavaVecaFeatureExtractor.from_pretrained(_UpperCAmelCase)
feature_extractor.push_to_hub('test-feature-extractor' , use_auth_token=self._token)
__A : List[Any] = WavaVecaFeatureExtractor.from_pretrained(F'{USER}/test-feature-extractor')
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase))
# Reset repo
delete_repo(token=self._token , repo_id='test-feature-extractor')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_UpperCAmelCase , repo_id='test-feature-extractor' , push_to_hub=_UpperCAmelCase , use_auth_token=self._token)
__A : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(F'{USER}/test-feature-extractor')
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = WavaVecaFeatureExtractor.from_pretrained(_UpperCAmelCase)
feature_extractor.push_to_hub('valid_org/test-feature-extractor' , use_auth_token=self._token)
__A : List[str] = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor')
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase))
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-feature-extractor')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_UpperCAmelCase , repo_id='valid_org/test-feature-extractor-org' , push_to_hub=_UpperCAmelCase , use_auth_token=self._token)
__A : Tuple = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org')
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
__A : str = CustomFeatureExtractor.from_pretrained(_UpperCAmelCase)
feature_extractor.push_to_hub('test-dynamic-feature-extractor' , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'} , )
__A : Dict = AutoFeatureExtractor.from_pretrained(
F'{USER}/test-dynamic-feature-extractor' , trust_remote_code=_UpperCAmelCase)
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , 'CustomFeatureExtractor') | 8 |
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=sys.maxsize):
'''simple docstring'''
__A : Union[str, Any] = 'bilinear'
__A : int = max_size
__A : Optional[Any] = short_edge_length
def __call__( self , _UpperCAmelCase):
'''simple docstring'''
__A : int = []
for img in imgs:
__A ,__A : Dict = img.shape[:2]
# later: provide list and randomly choose index for resize
__A : List[Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
__A : Tuple = size * 1.0 / min(_UpperCAmelCase , _UpperCAmelCase)
if h < w:
__A ,__A : Optional[Any] = size, scale * w
else:
__A ,__A : Optional[Any] = scale * h, size
if max(_UpperCAmelCase , _UpperCAmelCase) > self.max_size:
__A : Tuple = self.max_size * 1.0 / max(_UpperCAmelCase , _UpperCAmelCase)
__A : Tuple = newh * scale
__A : Dict = neww * scale
__A : Dict = int(neww + 0.5)
__A : Optional[int] = int(newh + 0.5)
if img.dtype == np.uinta:
__A : int = Image.fromarray(_UpperCAmelCase)
__A : Optional[int] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
__A : Dict = np.asarray(_UpperCAmelCase)
else:
__A : Optional[Any] = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
__A : Dict = nn.functional.interpolate(
_UpperCAmelCase , (newh, neww) , mode=self.interp_method , align_corners=_UpperCAmelCase).squeeze(0)
img_augs.append(_UpperCAmelCase)
return img_augs
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
__A : List[Any] = cfg.INPUT.FORMAT
__A : Dict = cfg.SIZE_DIVISIBILITY
__A : str = cfg.PAD_VALUE
__A : Union[str, Any] = cfg.INPUT.MAX_SIZE_TEST
__A : int = cfg.MODEL.DEVICE
__A : Tuple = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__A : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__A : int = lambda _UpperCAmelCase: (x - self.pixel_mean) / self.pixel_std
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = tuple(max(_UpperCAmelCase) for s in zip(*[img.shape for img in images]))
__A : Dict = [im.shape[-2:] for im in images]
__A : Optional[int] = [
nn.functional.pad(
_UpperCAmelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_UpperCAmelCase , _UpperCAmelCase)
]
return torch.stack(_UpperCAmelCase), torch.tensor(_UpperCAmelCase)
def __call__( self , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
with torch.no_grad():
if not isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : int = [images]
if single_image:
assert len(_UpperCAmelCase) == 1
for i in range(len(_UpperCAmelCase)):
if isinstance(images[i] , torch.Tensor):
images.insert(_UpperCAmelCase , images.pop(_UpperCAmelCase).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
_UpperCAmelCase , torch.as_tensor(img_tensorize(images.pop(_UpperCAmelCase) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
__A : str = torch.tensor([im.shape[:2] for im in images])
__A : List[str] = self.aug(_UpperCAmelCase)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__A : Any = [self.normalizer(_UpperCAmelCase) for x in images]
# now pad them to do the following operations
__A ,__A : Any = self.pad(_UpperCAmelCase)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__A : str = torch.true_divide(_UpperCAmelCase , _UpperCAmelCase)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : str ) -> Dict:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : Tuple[int, int] ) -> int:
assert torch.isfinite(__snake_case ).all(), "Box tensor contains infinite or NaN!"
__A ,__A : int = box_size
tensor[:, 0].clamp_(min=0 , max=__snake_case )
tensor[:, 1].clamp_(min=0 , max=__snake_case )
tensor[:, 2].clamp_(min=0 , max=__snake_case )
tensor[:, 3].clamp_(min=0 , max=__snake_case ) | 8 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase__ : Any = {
'''configuration_data2vec_audio''': ['''DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecAudioConfig'''],
'''configuration_data2vec_text''': [
'''DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecTextConfig''',
'''Data2VecTextOnnxConfig''',
],
'''configuration_data2vec_vision''': [
'''DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecVisionConfig''',
'''Data2VecVisionOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[str] = [
'''DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecAudioForAudioFrameClassification''',
'''Data2VecAudioForCTC''',
'''Data2VecAudioForSequenceClassification''',
'''Data2VecAudioForXVector''',
'''Data2VecAudioModel''',
'''Data2VecAudioPreTrainedModel''',
]
lowercase__ : Any = [
'''DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecTextForCausalLM''',
'''Data2VecTextForMaskedLM''',
'''Data2VecTextForMultipleChoice''',
'''Data2VecTextForQuestionAnswering''',
'''Data2VecTextForSequenceClassification''',
'''Data2VecTextForTokenClassification''',
'''Data2VecTextModel''',
'''Data2VecTextPreTrainedModel''',
]
lowercase__ : Dict = [
'''DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecVisionForImageClassification''',
'''Data2VecVisionForMaskedImageModeling''',
'''Data2VecVisionForSemanticSegmentation''',
'''Data2VecVisionModel''',
'''Data2VecVisionPreTrainedModel''',
]
if is_tf_available():
lowercase__ : Union[str, Any] = [
'''TFData2VecVisionForImageClassification''',
'''TFData2VecVisionForSemanticSegmentation''',
'''TFData2VecVisionModel''',
'''TFData2VecVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
lowercase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Optional[Any]: # noqa: E741
__A : Tuple = len(__snake_case )
__A : Optional[int] = 0
__A : str = [0] * n
__A : int = [False] * n
__A : Tuple = [False] * n
def dfs(__snake_case : List[str] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : int ):
if parent == root:
out_edge_count += 1
__A : str = True
__A : Tuple = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__A : Optional[int] = dfs(__snake_case , __snake_case , __snake_case , __snake_case )
__A : int = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__A : Tuple = True
# AP found via cycle
if at == low[to]:
__A : Optional[Any] = True
else:
__A : Any = min(low[at] , __snake_case )
return out_edge_count
for i in range(__snake_case ):
if not visited[i]:
__A : Tuple = 0
__A : List[Any] = dfs(__snake_case , __snake_case , -1 , __snake_case )
__A : Union[str, Any] = out_edge_count > 1
for x in range(len(__snake_case ) ):
if is_art[x] is True:
print(__snake_case )
# Adjacency list of graph
lowercase__ : Tuple = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data) | 8 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
lowercase__ : List[Any] = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[str] = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
lowercase__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : int = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''',
'''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''',
'''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
lowercase__ : Dict = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _lowerCAmelCase ( __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Any , __snake_case : List[str] ) -> Union[str, Any]:
for attribute in key.split('.' ):
__A : int = getattr(__snake_case , __snake_case )
if weight_type is not None:
__A : Optional[int] = getattr(__snake_case , __snake_case ).shape
else:
__A : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
__A : Tuple = value
elif weight_type == "weight_g":
__A : Union[str, Any] = value
elif weight_type == "weight_v":
__A : Optional[Any] = value
elif weight_type == "bias":
__A : Optional[int] = value
else:
__A : Optional[int] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCAmelCase ( __snake_case : Any , __snake_case : List[str] ) -> List[Any]:
__A : Optional[Any] = []
__A : Any = fairseq_model.state_dict()
__A : Union[str, Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__A : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == 'group' , )
__A : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__A : int = True
if "*" in mapped_key:
__A : Any = name.split(__snake_case )[0].split('.' )[-2]
__A : List[Any] = mapped_key.replace('*' , __snake_case )
if "weight_g" in name:
__A : Optional[Any] = 'weight_g'
elif "weight_v" in name:
__A : Union[str, Any] = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
__A : Optional[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__A : Tuple = 'weight'
else:
__A : Dict = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f'Unused weights: {unused_weights}' )
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Optional[int] ) -> int:
__A : int = full_name.split('conv_layers.' )[-1]
__A : List[str] = name.split('.' )
__A : Optional[int] = int(items[0] )
__A : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
__A : Optional[int] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
__A : Union[str, Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
__A : Dict = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
__A : Any = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple=None ) -> Any:
# load the pre-trained checkpoints
__A : List[str] = torch.load(__snake_case )
__A : Dict = WavLMConfigOrig(checkpoint['cfg'] )
__A : Optional[int] = WavLMOrig(__snake_case )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
__A : List[Any] = WavLMConfig.from_pretrained(__snake_case )
else:
__A : Dict = WavLMConfig()
__A : Optional[Any] = WavLMModel(__snake_case )
recursively_load_weights(__snake_case , __snake_case )
hf_wavlm.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowercase__ : Any = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 8 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
lowercase__ : Optional[Any] = logging.get_logger(__name__)
lowercase__ : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : Optional[Any] = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
lowercase__ : Tuple = {'''allegro/herbert-base-cased''': 5_14}
lowercase__ : List[Any] = {}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = HerbertTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase="</s>" , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
_UpperCAmelCase , _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , **_UpperCAmelCase , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
__A : List[str] = [self.cls_token_id]
__A : Union[str, Any] = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase)
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase)) + [1]
return [1] + ([0] * len(_UpperCAmelCase)) + [1] + ([0] * len(_UpperCAmelCase)) + [1]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
__A : Optional[int] = [self.sep_token_id]
__A : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
__A : int = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase)
return tuple(_UpperCAmelCase) | 8 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = 42
class SCREAMING_SNAKE_CASE (a__ , a__ ):
@register_to_config
def __init__( self , _UpperCAmelCase = 6_5536 , _UpperCAmelCase = None , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = 0 , _UpperCAmelCase = "fourier" , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = 0.0 , _UpperCAmelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _UpperCAmelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _UpperCAmelCase = "UNetMidBlock1D" , _UpperCAmelCase = None , _UpperCAmelCase = (32, 32, 64) , _UpperCAmelCase = None , _UpperCAmelCase = 8 , _UpperCAmelCase = 1 , _UpperCAmelCase = False , ):
'''simple docstring'''
super().__init__()
__A : Dict = sample_size
# time
if time_embedding_type == "fourier":
__A : int = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_UpperCAmelCase , log=_UpperCAmelCase , flip_sin_to_cos=_UpperCAmelCase)
__A : Any = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
__A : List[str] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=_UpperCAmelCase , downscale_freq_shift=_UpperCAmelCase)
__A : List[str] = block_out_channels[0]
if use_timestep_embedding:
__A : Optional[Any] = block_out_channels[0] * 4
__A : Optional[int] = TimestepEmbedding(
in_channels=_UpperCAmelCase , time_embed_dim=_UpperCAmelCase , act_fn=_UpperCAmelCase , out_dim=block_out_channels[0] , )
__A : Dict = nn.ModuleList([])
__A : Dict = None
__A : Tuple = nn.ModuleList([])
__A : Tuple = None
# down
__A : Any = in_channels
for i, down_block_type in enumerate(_UpperCAmelCase):
__A : Tuple = output_channel
__A : Optional[Any] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
__A : List[str] = i == len(_UpperCAmelCase) - 1
__A : int = get_down_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_UpperCAmelCase)
# mid
__A : str = get_mid_block(
_UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_UpperCAmelCase , add_downsample=_UpperCAmelCase , )
# up
__A : Optional[int] = list(reversed(_UpperCAmelCase))
__A : Optional[int] = reversed_block_out_channels[0]
if out_block_type is None:
__A : str = out_channels
else:
__A : List[Any] = block_out_channels[0]
for i, up_block_type in enumerate(_UpperCAmelCase):
__A : Optional[Any] = output_channel
__A : Optional[Any] = (
reversed_block_out_channels[i + 1] if i < len(_UpperCAmelCase) - 1 else final_upsample_channels
)
__A : Dict = i == len(_UpperCAmelCase) - 1
__A : str = get_up_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_UpperCAmelCase)
__A : Optional[int] = output_channel
# out
__A : str = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32)
__A : Optional[Any] = get_out_block(
out_block_type=_UpperCAmelCase , num_groups_out=_UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=_UpperCAmelCase , act_fn=_UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , ):
'''simple docstring'''
__A : Any = timestep
if not torch.is_tensor(_UpperCAmelCase):
__A : Any = torch.tensor([timesteps] , dtype=torch.long , device=sample.device)
elif torch.is_tensor(_UpperCAmelCase) and len(timesteps.shape) == 0:
__A : Any = timesteps[None].to(sample.device)
__A : List[Any] = self.time_proj(_UpperCAmelCase)
if self.config.use_timestep_embedding:
__A : Dict = self.time_mlp(_UpperCAmelCase)
else:
__A : Dict = timestep_embed[..., None]
__A : Tuple = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype)
__A : List[Any] = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]))
# 2. down
__A : int = ()
for downsample_block in self.down_blocks:
__A ,__A : int = downsample_block(hidden_states=_UpperCAmelCase , temb=_UpperCAmelCase)
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
__A : Optional[int] = self.mid_block(_UpperCAmelCase , _UpperCAmelCase)
# 4. up
for i, upsample_block in enumerate(self.up_blocks):
__A : Any = down_block_res_samples[-1:]
__A : Optional[int] = down_block_res_samples[:-1]
__A : Any = upsample_block(_UpperCAmelCase , res_hidden_states_tuple=_UpperCAmelCase , temb=_UpperCAmelCase)
# 5. post-process
if self.out_block:
__A : Dict = self.out_block(_UpperCAmelCase , _UpperCAmelCase)
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_UpperCAmelCase) | 8 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def _lowerCAmelCase ( __snake_case : list , __snake_case : list ) -> list:
if len(__snake_case ) != 2 or len(a[0] ) != 2 or len(__snake_case ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
__A : Optional[int] = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def _lowerCAmelCase ( __snake_case : list , __snake_case : list ) -> List[str]:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__snake_case ) )
]
def _lowerCAmelCase ( __snake_case : list , __snake_case : list ) -> Tuple:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__snake_case ) )
]
def _lowerCAmelCase ( __snake_case : list ) -> tuple[list, list, list, list]:
if len(__snake_case ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
__A : Dict = len(__snake_case )
__A : Dict = matrix_length // 2
__A : str = [[a[i][j] for j in range(__snake_case , __snake_case )] for i in range(__snake_case )]
__A : Dict = [
[a[i][j] for j in range(__snake_case , __snake_case )] for i in range(__snake_case , __snake_case )
]
__A : int = [[a[i][j] for j in range(__snake_case )] for i in range(__snake_case )]
__A : int = [[a[i][j] for j in range(__snake_case )] for i in range(__snake_case , __snake_case )]
return top_left, top_right, bot_left, bot_right
def _lowerCAmelCase ( __snake_case : list ) -> tuple[int, int]:
return len(__snake_case ), len(matrix[0] )
def _lowerCAmelCase ( __snake_case : list ) -> None:
print('\n'.join(str(__snake_case ) for line in matrix ) )
def _lowerCAmelCase ( __snake_case : list , __snake_case : list ) -> list:
if matrix_dimensions(__snake_case ) == (2, 2):
return default_matrix_multiplication(__snake_case , __snake_case )
__A ,__A ,__A ,__A : Any = split_matrix(__snake_case )
__A ,__A ,__A ,__A : int = split_matrix(__snake_case )
__A : Union[str, Any] = actual_strassen(__snake_case , matrix_subtraction(__snake_case , __snake_case ) )
__A : str = actual_strassen(matrix_addition(__snake_case , __snake_case ) , __snake_case )
__A : Tuple = actual_strassen(matrix_addition(__snake_case , __snake_case ) , __snake_case )
__A : List[Any] = actual_strassen(__snake_case , matrix_subtraction(__snake_case , __snake_case ) )
__A : Any = actual_strassen(matrix_addition(__snake_case , __snake_case ) , matrix_addition(__snake_case , __snake_case ) )
__A : Optional[int] = actual_strassen(matrix_subtraction(__snake_case , __snake_case ) , matrix_addition(__snake_case , __snake_case ) )
__A : Optional[int] = actual_strassen(matrix_subtraction(__snake_case , __snake_case ) , matrix_addition(__snake_case , __snake_case ) )
__A : str = matrix_addition(matrix_subtraction(matrix_addition(__snake_case , __snake_case ) , __snake_case ) , __snake_case )
__A : int = matrix_addition(__snake_case , __snake_case )
__A : Optional[Any] = matrix_addition(__snake_case , __snake_case )
__A : List[Any] = matrix_subtraction(matrix_subtraction(matrix_addition(__snake_case , __snake_case ) , __snake_case ) , __snake_case )
# construct the new matrix from our 4 quadrants
__A : Union[str, Any] = []
for i in range(len(__snake_case ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__snake_case ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def _lowerCAmelCase ( __snake_case : list , __snake_case : list ) -> list:
if matrix_dimensions(__snake_case )[1] != matrix_dimensions(__snake_case )[0]:
__A : Union[str, Any] = (
'Unable to multiply these matrices, please check the dimensions.\n'
f'Matrix A: {matrixa}\n'
f'Matrix B: {matrixa}'
)
raise Exception(__snake_case )
__A : Optional[int] = matrix_dimensions(__snake_case )
__A : Tuple = matrix_dimensions(__snake_case )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__A : Union[str, Any] = max(*__snake_case , *__snake_case )
__A : Any = int(math.pow(2 , math.ceil(math.loga(__snake_case ) ) ) )
__A : Any = matrixa
__A : str = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __snake_case ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __snake_case ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __snake_case ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
__A : Any = actual_strassen(__snake_case , __snake_case )
# Removing the additional zeros
for i in range(0 , __snake_case ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __snake_case ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
lowercase__ : Any = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
lowercase__ : Tuple = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa)) | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> int:
if len(__snake_case ) != len(__snake_case ):
raise ValueError('String lengths must match!' )
__A : Optional[Any] = 0
for chara, chara in zip(__snake_case , __snake_case ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : List[Any] = logging.get_logger()
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = 42
lowerCAmelCase = field(default_factory=a__ )
lowerCAmelCase = field(default_factory=a__ )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = len(list(m.modules())) == 1 or isinstance(_UpperCAmelCase , nn.Convad) or isinstance(_UpperCAmelCase , nn.BatchNormad)
if has_not_submodules:
self.traced.append(_UpperCAmelCase)
def __call__( self , _UpperCAmelCase):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook))
self.module(_UpperCAmelCase)
[x.remove() for x in self.handles]
return self
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return list(filter(lambda _UpperCAmelCase: len(list(x.state_dict().keys())) > 0 , self.traced))
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 1
lowerCAmelCase = field(default_factory=a__ )
lowerCAmelCase = field(default_factory=a__ )
lowerCAmelCase = True
def __call__( self , _UpperCAmelCase):
'''simple docstring'''
__A : Union[str, Any] = Tracker(self.dest)(_UpperCAmelCase).parametrized
__A : Any = Tracker(self.src)(_UpperCAmelCase).parametrized
__A : int = list(filter(lambda _UpperCAmelCase: type(_UpperCAmelCase) not in self.src_skip , _UpperCAmelCase))
__A : Tuple = list(filter(lambda _UpperCAmelCase: type(_UpperCAmelCase) not in self.dest_skip , _UpperCAmelCase))
if len(_UpperCAmelCase) != len(_UpperCAmelCase) and self.raise_if_mismatch:
raise Exception(
F'Numbers of operations are different. Source module has {len(_UpperCAmelCase)} operations while'
F' destination module has {len(_UpperCAmelCase)}.')
for dest_m, src_m in zip(_UpperCAmelCase , _UpperCAmelCase):
dest_m.load_state_dict(src_m.state_dict())
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}')
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
super().__init__()
__A : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(('conv1', model.stem))
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block'), F'Unexpected layer name {k}'
__A : Union[str, Any] = len(_UpperCAmelCase) + 1
feature_blocks.append((F'res{block_index}', v))
__A : List[str] = nn.ModuleDict(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return get_trunk_forward_outputs(
_UpperCAmelCase , out_feat_keys=_UpperCAmelCase , feature_blocks=self._feature_blocks , )
class SCREAMING_SNAKE_CASE (a__ ):
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : str = x.split('-')
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:])
def __getitem__( self , _UpperCAmelCase):
'''simple docstring'''
if x not in self:
__A : Dict = self.convert_name_to_timm(_UpperCAmelCase)
__A : Union[str, Any] = partial(lambda: (timm.create_model(_UpperCAmelCase , pretrained=_UpperCAmelCase).eval(), None))
else:
__A : Tuple = super().__getitem__(_UpperCAmelCase)
return val
class SCREAMING_SNAKE_CASE (a__ ):
def __getitem__( self , _UpperCAmelCase):
'''simple docstring'''
if "seer" in x and "in1k" not in x:
__A : Union[str, Any] = RegNetModel
else:
__A : Union[str, Any] = RegNetForImageClassification
return val
def _lowerCAmelCase ( __snake_case : str , __snake_case : Tuple , __snake_case : List[Tuple[str, str]] ) -> Optional[int]:
for from_key, to_key in keys:
__A : Optional[int] = from_state_dict[from_key].clone()
print(f'Copied key={from_key} to={to_key}' )
return to_state_dict
def _lowerCAmelCase ( __snake_case : str , __snake_case : Callable[[], nn.Module] , __snake_case : Callable[[], nn.Module] , __snake_case : RegNetConfig , __snake_case : Path , __snake_case : bool = True , ) -> Any:
print(f'Converting {name}...' )
with torch.no_grad():
__A ,__A : Dict = from_model_func()
__A : Optional[int] = our_model_func(__snake_case ).eval()
__A : Optional[int] = ModuleTransfer(src=__snake_case , dest=__snake_case , raise_if_mismatch=__snake_case )
__A : int = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(__snake_case )
if from_state_dict is not None:
__A : int = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
__A : str = [('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')]
__A : Tuple = manually_copy_vissl_head(__snake_case , our_model.state_dict() , __snake_case )
our_model.load_state_dict(__snake_case )
__A : Tuple = our_model(__snake_case , output_hidden_states=__snake_case )
__A : Optional[int] = (
our_outputs.logits if isinstance(__snake_case , __snake_case ) else our_outputs.last_hidden_state
)
__A : Union[str, Any] = from_model(__snake_case )
__A : int = from_output[-1] if type(__snake_case ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
__A : List[Any] = our_outputs.hidden_states[-1]
assert torch.allclose(__snake_case , __snake_case ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add model' , use_temp_dir=__snake_case , )
__A : List[Any] = 2_24 if 'seer' not in name else 3_84
# we can use the convnext one
__A : List[str] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' , size=__snake_case )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add image processor' , use_temp_dir=__snake_case , )
print(f'Pushed {name}' )
def _lowerCAmelCase ( __snake_case : Path , __snake_case : str = None , __snake_case : bool = True ) -> Any:
__A : List[Any] = 'imagenet-1k-id2label.json'
__A : int = 10_00
__A : int = (1, num_labels)
__A : Dict = 'huggingface/label-files'
__A : Dict = num_labels
__A : List[Any] = json.load(open(cached_download(hf_hub_url(__snake_case , __snake_case , repo_type='dataset' ) ) , 'r' ) )
__A : Optional[Any] = {int(__snake_case ): v for k, v in idalabel.items()}
__A : Optional[Any] = idalabel
__A : Tuple = {v: k for k, v in idalabel.items()}
__A : Optional[Any] = partial(__snake_case , num_labels=__snake_case , idalabel=__snake_case , labelaid=__snake_case )
__A : Union[str, Any] = {
'regnet-x-002': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 , layer_type='x' ),
'regnet-x-004': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 1_60, 3_84] , groups_width=16 , layer_type='x' ),
'regnet-x-006': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 2_40, 5_28] , groups_width=24 , layer_type='x' ),
'regnet-x-008': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 1_28, 2_88, 6_72] , groups_width=16 , layer_type='x' ),
'regnet-x-016': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 1_68, 4_08, 9_12] , groups_width=24 , layer_type='x' ),
'regnet-x-032': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 1_92, 4_32, 10_08] , groups_width=48 , layer_type='x' ),
'regnet-x-040': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 2_40, 5_60, 13_60] , groups_width=40 , layer_type='x' ),
'regnet-x-064': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 3_92, 7_84, 16_24] , groups_width=56 , layer_type='x' ),
'regnet-x-080': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 2_40, 7_20, 19_20] , groups_width=1_20 , layer_type='x' ),
'regnet-x-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 , layer_type='x' ),
'regnet-x-160': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[2_56, 5_12, 8_96, 20_48] , groups_width=1_28 , layer_type='x' ),
'regnet-x-320': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[3_36, 6_72, 13_44, 25_20] , groups_width=1_68 , layer_type='x' ),
# y variant
'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 ),
'regnet-y-004': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 1_04, 2_08, 4_40] , groups_width=8 ),
'regnet-y-006': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 1_12, 2_56, 6_08] , groups_width=16 ),
'regnet-y-008': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 1_28, 3_20, 7_68] , groups_width=16 ),
'regnet-y-016': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 1_20, 3_36, 8_88] , groups_width=24 ),
'regnet-y-032': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 2_16, 5_76, 15_12] , groups_width=24 ),
'regnet-y-040': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[1_28, 1_92, 5_12, 10_88] , groups_width=64 ),
'regnet-y-064': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[1_44, 2_88, 5_76, 12_96] , groups_width=72 ),
'regnet-y-080': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 4_48, 8_96, 20_16] , groups_width=56 ),
'regnet-y-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 ),
'regnet-y-160': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[2_24, 4_48, 12_32, 30_24] , groups_width=1_12 ),
'regnet-y-320': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
'regnet-y-1280-seer': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
'regnet-y-2560-seer': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
'regnet-y-10b-seer': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
# finetuned on imagenet
'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
}
__A : List[Any] = NameToOurModelFuncMap()
__A : List[str] = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(__snake_case : str , __snake_case : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
__A : Tuple = torch.hub.load_state_dict_from_url(__snake_case , model_dir=str(__snake_case ) , map_location='cpu' )
__A : Dict = model_func()
# check if we have a head, if yes add it
__A : List[Any] = files['classy_state_dict']['base_model']['model']
__A : Optional[int] = model_state_dict['trunk']
model.load_state_dict(__snake_case )
return model.eval(), model_state_dict["heads"]
# pretrained
__A : int = partial(
__snake_case , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__A : Optional[Any] = partial(
__snake_case , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__A : int = partial(
__snake_case , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__A : Any = partial(
__snake_case , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
__A : Dict = partial(
__snake_case , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__A : List[str] = partial(
__snake_case , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__A : Optional[int] = partial(
__snake_case , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__A : int = partial(
__snake_case , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
__snake_case , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , __snake_case , __snake_case , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
__snake_case , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , __snake_case , __snake_case , __snake_case , )
return config, expected_shape
if __name__ == "__main__":
lowercase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported regnet* architecture,'''
''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
lowercase__ : Optional[int] = parser.parse_args()
lowercase__ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 8 |
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Tuple = logging.get_logger(__name__)
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> Union[str, Any]:
__A : int = RobertaPreLayerNormConfig.from_pretrained(
__snake_case , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
__A : Tuple = torch.load(hf_hub_download(repo_id=__snake_case , filename='pytorch_model.bin' ) )
__A : str = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
__A : Dict = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
__A : str = tensor_value
__A : Union[str, Any] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__snake_case , config=__snake_case , state_dict=__snake_case )
model.save_pretrained(__snake_case )
# convert tokenizer
__A : List[Any] = AutoTokenizer.from_pretrained(__snake_case )
tokenizer.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 8 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : Any = {
'''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''],
'''tokenization_roformer''': ['''RoFormerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[Any] = ['''RoFormerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[Any] = [
'''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoFormerForCausalLM''',
'''RoFormerForMaskedLM''',
'''RoFormerForMultipleChoice''',
'''RoFormerForQuestionAnswering''',
'''RoFormerForSequenceClassification''',
'''RoFormerForTokenClassification''',
'''RoFormerLayer''',
'''RoFormerModel''',
'''RoFormerPreTrainedModel''',
'''load_tf_weights_in_roformer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
'''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRoFormerForCausalLM''',
'''TFRoFormerForMaskedLM''',
'''TFRoFormerForMultipleChoice''',
'''TFRoFormerForQuestionAnswering''',
'''TFRoFormerForSequenceClassification''',
'''TFRoFormerForTokenClassification''',
'''TFRoFormerLayer''',
'''TFRoFormerModel''',
'''TFRoFormerPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Any = [
'''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxRoFormerForMaskedLM''',
'''FlaxRoFormerForMultipleChoice''',
'''FlaxRoFormerForQuestionAnswering''',
'''FlaxRoFormerForSequenceClassification''',
'''FlaxRoFormerForTokenClassification''',
'''FlaxRoFormerModel''',
'''FlaxRoFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowercase__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
lowercase__ : Dict = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = field(default=a__ , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = super().to_dict()
for k, v in d.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : List[Any] = v.to_dict()
return d | 8 | 1 |
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = mock.Mock()
__A : List[Any] = 500
__A : Optional[int] = {}
__A : str = HTTPError
__A : Any = {}
# Download this model to make sure it's in the cache.
__A : Tuple = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=_UpperCAmelCase) as mock_head:
__A : Optional[Any] = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = mock.Mock()
__A : Union[str, Any] = 500
__A : Any = {}
__A : Tuple = HTTPError
__A : Tuple = {}
# Download this model to make sure it's in the cache.
__A : Optional[int] = GPTaTokenizerFast.from_pretrained('gpt2')
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=_UpperCAmelCase) as mock_head:
__A : str = GPTaTokenizerFast.from_pretrained('gpt2')
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
try:
__A : Any = tempfile.mktemp()
with open(_UpperCAmelCase , 'wb') as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , _UpperCAmelCase)
__A : Tuple = AlbertTokenizer.from_pretrained(_UpperCAmelCase)
finally:
os.remove(_UpperCAmelCase)
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json'):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb') as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , _UpperCAmelCase)
__A : Any = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2')
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1000)
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model')
@is_staging_test
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def SCREAMING_SNAKE_CASE ( cls):
'''simple docstring'''
__A : List[Any] = TOKEN
HfFolder.save_token(_UpperCAmelCase)
@classmethod
def SCREAMING_SNAKE_CASE ( cls):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-tokenizer')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer')
except HTTPError:
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__A : Dict = os.path.join(_UpperCAmelCase , 'vocab.txt')
with open(_UpperCAmelCase , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
__A : Optional[Any] = BertTokenizer(_UpperCAmelCase)
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token)
__A : Dict = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase , repo_id='test-tokenizer' , push_to_hub=_UpperCAmelCase , use_auth_token=self._token)
__A : Any = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__A : Optional[int] = os.path.join(_UpperCAmelCase , 'vocab.txt')
with open(_UpperCAmelCase , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
__A : List[Any] = BertTokenizer(_UpperCAmelCase)
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token)
__A : List[Any] = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
_UpperCAmelCase , repo_id='valid_org/test-tokenizer-org' , push_to_hub=_UpperCAmelCase , use_auth_token=self._token)
__A : str = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__A : str = os.path.join(_UpperCAmelCase , 'vocab.txt')
with open(_UpperCAmelCase , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
__A : Any = CustomTokenizer(_UpperCAmelCase)
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token)
__A : List[Any] = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' , trust_remote_code=_UpperCAmelCase)
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer')
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__A : List[str] = os.path.join(_UpperCAmelCase , 'vocab.txt')
with open(_UpperCAmelCase , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
__A : Tuple = BertTokenizerFast.from_pretrained(_UpperCAmelCase)
bert_tokenizer.save_pretrained(_UpperCAmelCase)
__A : Any = CustomTokenizerFast.from_pretrained(_UpperCAmelCase)
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token)
__A : Tuple = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' , trust_remote_code=_UpperCAmelCase)
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast')
__A : Union[str, Any] = AutoTokenizer.from_pretrained(
F'{USER}/test-dynamic-tokenizer' , use_fast=_UpperCAmelCase , trust_remote_code=_UpperCAmelCase)
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer')
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = Trie()
trie.add('Hello 友達')
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}})
trie.add('Hello')
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}})
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100') , ['[CLS] This is a extra_id_100'])
trie.add('[CLS]')
trie.add('extra_id_1')
trie.add('extra_id_100')
self.assertEqual(trie.split('[CLS] This is a extra_id_100') , ['[CLS]', ' This is a ', 'extra_id_100'])
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = Trie()
trie.add('A')
self.assertEqual(trie.split('ABC') , ['A', 'BC'])
self.assertEqual(trie.split('BCA') , ['BC', 'A'])
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = Trie()
trie.add('TOKEN]')
trie.add('[SPECIAL_TOKEN]')
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]') , ['This is something ', '[SPECIAL_TOKEN]'])
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = Trie()
trie.add('A')
trie.add('P')
trie.add('[SPECIAL_TOKEN]')
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]') , ['This is something ', '[SPECIAL_TOKEN]'])
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = Trie()
trie.add('AB')
trie.add('B')
trie.add('C')
self.assertEqual(trie.split('ABC') , ['AB', 'C'])
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = Trie()
trie.add('ABC')
trie.add('B')
trie.add('CD')
self.assertEqual(trie.split('ABCD') , ['ABC', 'D'])
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = Trie()
__A : Optional[int] = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3])
self.assertEqual(_UpperCAmelCase , ['AB', 'C']) | 8 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''lxmert'''
lowerCAmelCase = {}
def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=9500 , _UpperCAmelCase=1600 , _UpperCAmelCase=400 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=9 , _UpperCAmelCase=5 , _UpperCAmelCase=5 , _UpperCAmelCase=2048 , _UpperCAmelCase=4 , _UpperCAmelCase=6.67 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Tuple = vocab_size
__A : int = hidden_size
__A : str = num_attention_heads
__A : Tuple = hidden_act
__A : int = intermediate_size
__A : str = hidden_dropout_prob
__A : Optional[int] = attention_probs_dropout_prob
__A : Optional[Any] = max_position_embeddings
__A : Tuple = type_vocab_size
__A : Optional[int] = initializer_range
__A : Any = layer_norm_eps
__A : Optional[Any] = num_qa_labels
__A : Optional[int] = num_object_labels
__A : Any = num_attr_labels
__A : Union[str, Any] = l_layers
__A : Optional[int] = x_layers
__A : List[Any] = r_layers
__A : Tuple = visual_feat_dim
__A : Tuple = visual_pos_dim
__A : Optional[int] = visual_loss_normalizer
__A : int = task_matched
__A : List[Any] = task_mask_lm
__A : Optional[Any] = task_obj_predict
__A : str = task_qa
__A : List[Any] = visual_obj_loss
__A : Optional[Any] = visual_attr_loss
__A : Union[str, Any] = visual_feat_loss
__A : Union[str, Any] = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**_UpperCAmelCase) | 8 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP | 8 |
'''simple docstring'''
import math
import sys
def _lowerCAmelCase ( __snake_case : int ) -> int:
if number != int(__snake_case ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
__A : str = [-1] * (number + 1)
__A : Dict = 0
for i in range(1 , number + 1 ):
__A : int = sys.maxsize
__A : int = int(math.sqrt(__snake_case ) )
for j in range(1 , root + 1 ):
__A : str = 1 + answers[i - (j**2)]
__A : Dict = min(__snake_case , __snake_case )
__A : Union[str, Any] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
from manim import *
class SCREAMING_SNAKE_CASE (a__ ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = Rectangle(height=0.5 , width=0.5)
__A : Tuple = Rectangle(height=0.46 , width=0.46).set_stroke(width=0)
__A : Dict = Rectangle(height=0.25 , width=0.25)
__A : str = [mem.copy() for i in range(6)]
__A : Union[str, Any] = [mem.copy() for i in range(6)]
__A : int = VGroup(*_UpperCAmelCase).arrange(_UpperCAmelCase , buff=0)
__A : Dict = VGroup(*_UpperCAmelCase).arrange(_UpperCAmelCase , buff=0)
__A : List[str] = VGroup(_UpperCAmelCase , _UpperCAmelCase).arrange(_UpperCAmelCase , buff=0)
__A : Any = Text('CPU' , font_size=24)
__A : Any = Group(_UpperCAmelCase , _UpperCAmelCase).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase)
cpu.move_to([-2.5, -0.5, 0])
self.add(_UpperCAmelCase)
__A : Any = [mem.copy() for i in range(4)]
__A : Optional[Any] = VGroup(*_UpperCAmelCase).arrange(_UpperCAmelCase , buff=0)
__A : Any = Text('GPU' , font_size=24)
__A : int = Group(_UpperCAmelCase , _UpperCAmelCase).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase)
gpu.move_to([-1, -1, 0])
self.add(_UpperCAmelCase)
__A : Optional[Any] = [mem.copy() for i in range(6)]
__A : List[Any] = VGroup(*_UpperCAmelCase).arrange(_UpperCAmelCase , buff=0)
__A : Dict = Text('Model' , font_size=24)
__A : int = Group(_UpperCAmelCase , _UpperCAmelCase).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase)
model.move_to([3, -1.0, 0])
self.add(_UpperCAmelCase)
__A : int = []
__A : Union[str, Any] = []
for i, rect in enumerate(_UpperCAmelCase):
__A : List[str] = fill.copy().set_fill(_UpperCAmelCase , opacity=0.8)
target.move_to(_UpperCAmelCase)
model_arr.append(_UpperCAmelCase)
__A : Dict = Rectangle(height=0.46 , width=0.46).set_stroke(width=0.0).set_fill(_UpperCAmelCase , opacity=0.8)
cpu_target.move_to(cpu_left_col_base[i])
model_cpu_arr.append(_UpperCAmelCase)
self.add(*_UpperCAmelCase , *_UpperCAmelCase)
__A : str = [meta_mem.copy() for i in range(6)]
__A : Optional[Any] = [meta_mem.copy() for i in range(6)]
__A : Optional[int] = VGroup(*_UpperCAmelCase).arrange(_UpperCAmelCase , buff=0)
__A : Any = VGroup(*_UpperCAmelCase).arrange(_UpperCAmelCase , buff=0)
__A : List[Any] = VGroup(_UpperCAmelCase , _UpperCAmelCase).arrange(_UpperCAmelCase , buff=0)
__A : List[Any] = Text('Disk' , font_size=24)
__A : int = Group(_UpperCAmelCase , _UpperCAmelCase).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase)
disk.move_to([-4, -1.25, 0])
self.add(_UpperCAmelCase , _UpperCAmelCase)
__A : List[Any] = Square(side_length=2.2)
key.move_to([-5, 2, 0])
__A : Dict = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
self.add(_UpperCAmelCase , _UpperCAmelCase)
__A : Dict = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(_UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left())
self.add(_UpperCAmelCase)
__A : Union[str, Any] = MarkupText(
F'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' , font_size=24 , )
step_a.move_to([2, 2, 0])
self.play(Write(_UpperCAmelCase))
__A : Dict = Square(0.3)
input.set_fill(_UpperCAmelCase , opacity=1.0)
input.set_stroke(width=0.0)
input.next_to(model_base[0] , _UpperCAmelCase , buff=0.5)
self.play(Write(_UpperCAmelCase))
input.generate_target()
input.target.next_to(model_arr[0] , direction=_UpperCAmelCase , buff=0.02)
self.play(MoveToTarget(_UpperCAmelCase))
self.play(FadeOut(_UpperCAmelCase))
__A : Dict = Arrow(start=_UpperCAmelCase , end=_UpperCAmelCase , color=_UpperCAmelCase , buff=0.5)
a.next_to(model_arr[0].get_left() , _UpperCAmelCase , buff=0.2)
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0])
__A : List[str] = MarkupText(
F'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' , font_size=24 , )
step_a.move_to([2, 2, 0])
self.play(Write(_UpperCAmelCase , run_time=3))
__A : Optional[Any] = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(_UpperCAmelCase) , Circumscribe(model_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase) , Circumscribe(model_cpu_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase) , )
self.play(MoveToTarget(model_cpu_arr[0]))
__A : List[Any] = a.copy()
for i in range(6):
a_c.next_to(model_arr[i].get_right() + 0.02 , _UpperCAmelCase , buff=0.2)
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02)
__A : List[Any] = AnimationGroup(
FadeOut(_UpperCAmelCase , run_time=0.5) , MoveToTarget(_UpperCAmelCase , run_time=0.5) , FadeIn(_UpperCAmelCase , run_time=0.5) , lag_ratio=0.2)
self.play(_UpperCAmelCase)
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i])
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0])
if i >= 1:
__A : Union[str, Any] = 0.7
self.play(
Circumscribe(model_arr[i] , **_UpperCAmelCase) , Circumscribe(cpu_left_col_base[i] , **_UpperCAmelCase) , Circumscribe(cpu_left_col_base[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase) , Circumscribe(model_arr[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i]) , MoveToTarget(model_cpu_arr[i + 1]) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1])
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2)
self.play(
Circumscribe(model_arr[-1] , color=_UpperCAmelCase , **_UpperCAmelCase) , Circumscribe(cpu_left_col_base[-1] , color=_UpperCAmelCase , **_UpperCAmelCase) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase) , )
self.play(MoveToTarget(model_cpu_arr[i]))
__A : str = a_c
__A : Union[str, Any] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5)
self.play(
FadeOut(_UpperCAmelCase) , FadeOut(_UpperCAmelCase , run_time=0.5) , )
__A : Optional[Any] = MarkupText(F'Inference on a model too large for GPU memory\nis successfully completed.' , font_size=24)
step_a.move_to([2, 2, 0])
self.play(Write(_UpperCAmelCase , run_time=3) , MoveToTarget(_UpperCAmelCase))
self.wait() | 8 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __snake_case : list[int] , __snake_case : list[int] , __snake_case : int ) -> tuple[float, list[float]]:
__A : int = list(range(len(__snake_case ) ) )
__A : Optional[Any] = [v / w for v, w in zip(__snake_case , __snake_case )]
index.sort(key=lambda __snake_case : ratio[i] , reverse=__snake_case )
__A : float = 0
__A : list[float] = [0] * len(__snake_case )
for i in index:
if weight[i] <= capacity:
__A : Optional[int] = 1
max_value += value[i]
capacity -= weight[i]
else:
__A : List[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
lowercase__ : Tuple = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = 42
lowerCAmelCase = None
@staticmethod
def SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
raise NotImplementedError
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
raise NotImplementedError
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
raise NotImplementedError
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
F'You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.')
@classmethod
def SCREAMING_SNAKE_CASE ( cls):
'''simple docstring'''
return F'`pip install {cls.pip_package or cls.name}`'
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''optuna'''
@staticmethod
def SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
return is_optuna_available()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
return run_hp_search_optuna(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return default_hp_space_optuna(_UpperCAmelCase)
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''ray'''
lowerCAmelCase = '''\'ray[tune]\''''
@staticmethod
def SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
return is_ray_available()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
return run_hp_search_ray(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return default_hp_space_ray(_UpperCAmelCase)
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''sigopt'''
@staticmethod
def SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
return is_sigopt_available()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
return run_hp_search_sigopt(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return default_hp_space_sigopt(_UpperCAmelCase)
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''wandb'''
@staticmethod
def SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
return is_wandb_available()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
return run_hp_search_wandb(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return default_hp_space_wandb(_UpperCAmelCase)
lowercase__ : Any = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def _lowerCAmelCase ( ) -> str:
__A : Union[str, Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(__snake_case ) > 0:
__A : int = available_backends[0].name
if len(__snake_case ) > 1:
logger.info(
f'{len(__snake_case )} hyperparameter search backends available. Using {name} as the default.' )
return name
raise RuntimeError(
'No hyperparameter search backend available.\n'
+ '\n'.join(
f' - To install {backend.name} run {backend.pip_install()}'
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) ) | 8 |
'''simple docstring'''
from __future__ import annotations
import math
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : int = size
# approximate the overall size of segment tree with given value
__A : Optional[Any] = [0 for i in range(0 , 4 * size)]
# create array to store lazy update
__A : Optional[Any] = [0 for i in range(0 , 4 * size)]
__A : str = [0 for i in range(0 , 4 * size)] # flag for lazy update
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return idx * 2
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return idx * 2 + 1
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if left_element == right_element:
__A : List[Any] = a[left_element - 1]
else:
__A : List[str] = (left_element + right_element) // 2
self.build(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
self.build(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase)
__A : Any = max(
self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)])
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if self.flag[idx] is True:
__A : Optional[Any] = self.lazy[idx]
__A : Optional[Any] = False
if left_element != right_element:
__A : List[Any] = self.lazy[idx]
__A : Dict = self.lazy[idx]
__A : Tuple = True
__A : Union[str, Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
__A : Optional[int] = val
if left_element != right_element:
__A : Tuple = val
__A : Any = val
__A : Tuple = True
__A : Union[str, Any] = True
return True
__A : str = (left_element + right_element) // 2
self.update(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
self.update(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : int = max(
self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)])
return True
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if self.flag[idx] is True:
__A : Union[str, Any] = self.lazy[idx]
__A : List[str] = False
if left_element != right_element:
__A : Union[str, Any] = self.lazy[idx]
__A : Optional[int] = self.lazy[idx]
__A : str = True
__A : Union[str, Any] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
__A : Any = (left_element + right_element) // 2
__A : int = self.query(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : Union[str, Any] = self.query(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
return max(_UpperCAmelCase , _UpperCAmelCase)
def __str__( self):
'''simple docstring'''
return str([self.query(1 , 1 , self.size , _UpperCAmelCase , _UpperCAmelCase) for i in range(1 , self.size + 1)])
if __name__ == "__main__":
lowercase__ : Union[str, Any] = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
lowercase__ : str = 15
lowercase__ : List[Any] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 1_11)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 2_35)
print(segt) | 8 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE (a__ ):
pass
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : Any = data
__A : Node | None = None
def __iter__( self):
'''simple docstring'''
__A : Dict = self
__A : List[Any] = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(_UpperCAmelCase)
yield node.data
__A : Optional[int] = node.next_node
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
try:
list(self)
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
lowercase__ : Tuple = Node(1)
lowercase__ : Optional[Any] = Node(2)
lowercase__ : Union[str, Any] = Node(3)
lowercase__ : Union[str, Any] = Node(4)
print(root_node.has_loop) # False
lowercase__ : int = root_node.next_node
print(root_node.has_loop) # True
lowercase__ : Optional[int] = Node(5)
lowercase__ : List[str] = Node(6)
lowercase__ : Tuple = Node(5)
lowercase__ : Dict = Node(6)
print(root_node.has_loop) # False
lowercase__ : List[Any] = Node(1)
print(root_node.has_loop) # False | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int , __snake_case : int , __snake_case : int ) -> float:
__A : Dict = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _lowerCAmelCase ( ) -> Union[str, Any]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase__ : int = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[int] = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowercase__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
'''simple docstring'''
__A : Optional[int] = parent
__A : str = 13
__A : List[Any] = 7
__A : List[str] = True
__A : str = True
__A : Optional[Any] = True
__A : int = True
__A : Dict = 99
__A : Dict = 384
__A : Any = 2
__A : int = 4
__A : Optional[Any] = 37
__A : Optional[int] = 'gelu'
__A : Dict = 0.1
__A : Optional[int] = 0.1
__A : Any = 512
__A : int = 16
__A : List[str] = 2
__A : str = 0.02
__A : Any = 3
__A : str = 4
__A : Union[str, Any] = 128
__A : int = 2
__A : List[Any] = 9
__A : List[Any] = 1
__A : List[Any] = None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__A : str = None
if self.use_input_mask:
__A : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__A : Optional[Any] = None
if self.use_token_type_ids:
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__A : Optional[int] = None
__A : List[str] = None
__A : Dict = None
if self.use_labels:
__A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__A : str = ids_tensor([self.batch_size] , self.num_choices)
__A : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = TFConvBertModel(config=_UpperCAmelCase)
__A : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__A : Tuple = [input_ids, input_mask]
__A : Any = model(_UpperCAmelCase)
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : str = TFConvBertForMaskedLM(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : str = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = self.num_labels
__A : Any = TFConvBertForSequenceClassification(config=_UpperCAmelCase)
__A : Optional[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = self.num_choices
__A : List[str] = TFConvBertForMultipleChoice(config=_UpperCAmelCase)
__A : int = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : List[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : int = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__A : Optional[Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = self.num_labels
__A : List[Any] = TFConvBertForTokenClassification(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : int = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = TFConvBertForQuestionAnswering(config=_UpperCAmelCase)
__A : Any = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Union[str, Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.prepare_config_and_inputs()
(
(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,
) : Union[str, Any] = config_and_inputs
__A : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = TFConvBertModelTester(self)
__A : str = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : List[str] = True
__A : List[str] = True
if hasattr(_UpperCAmelCase , 'use_cache'):
__A : List[Any] = True
__A : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : Union[str, Any] = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
for model_class in self.all_model_classes:
__A : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)
__A : Optional[int] = model_class(_UpperCAmelCase)
__A : Optional[Any] = len(model(_UpperCAmelCase))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase)
__A : Union[str, Any] = os.path.join(_UpperCAmelCase , 'saved_model' , '1')
__A : Tuple = tf.keras.models.load_model(_UpperCAmelCase)
__A : str = model(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Optional[int] = outputs['encoder_hidden_states']
__A : str = outputs['encoder_attentions']
else:
__A : List[Any] = outputs['hidden_states']
__A : Optional[Any] = outputs['attentions']
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
__A : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
self.assertListEqual(
list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
self.assertIsNotNone(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = True
__A : str = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length)
__A : Any = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : int = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
__A : Tuple = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
def check_decoder_attentions_output(_UpperCAmelCase):
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(out_len % 2 , 0)
__A : Any = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase):
__A : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__A : Dict = True
__A : Any = False
__A : str = model_class(_UpperCAmelCase)
__A : List[str] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_decoder_attentions_output(_UpperCAmelCase)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__A : int = True
__A : Tuple = model_class(_UpperCAmelCase)
__A : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
# Check attention is always last and order is fine
__A : Any = True
__A : str = True
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : Union[str, Any] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase))
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
@require_tf
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
__A : str = tf.constant([[0, 1, 2, 3, 4, 5]])
__A : Optional[int] = model(_UpperCAmelCase)[0]
__A : List[Any] = [1, 6, 768]
self.assertEqual(output.shape , _UpperCAmelCase)
__A : Tuple = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
])
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4) | 8 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
'''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''',
'''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''',
'''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''',
'''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''',
'''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''',
'''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''',
'''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''',
'''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''xlm'''
lowerCAmelCase = {
'''hidden_size''': '''emb_dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
'''n_words''': '''vocab_size''', # For backward compatibility
}
def __init__( self , _UpperCAmelCase=3_0145 , _UpperCAmelCase=2048 , _UpperCAmelCase=12 , _UpperCAmelCase=16 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=1 , _UpperCAmelCase=True , _UpperCAmelCase=512 , _UpperCAmelCase=2048**-0.5 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=5 , _UpperCAmelCase=True , _UpperCAmelCase="first" , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=0.1 , _UpperCAmelCase=5 , _UpperCAmelCase=5 , _UpperCAmelCase=0 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase=0 , **_UpperCAmelCase , ):
'''simple docstring'''
__A : int = vocab_size
__A : Optional[int] = emb_dim
__A : Any = n_layers
__A : Optional[Any] = n_heads
__A : Optional[Any] = dropout
__A : Optional[int] = attention_dropout
__A : List[str] = gelu_activation
__A : Any = sinusoidal_embeddings
__A : List[Any] = causal
__A : Any = asm
__A : int = n_langs
__A : List[Any] = use_lang_emb
__A : Tuple = layer_norm_eps
__A : Any = bos_index
__A : Any = eos_index
__A : Optional[Any] = pad_index
__A : int = unk_index
__A : List[Any] = mask_index
__A : List[str] = is_encoder
__A : Dict = max_position_embeddings
__A : Any = embed_init_std
__A : Tuple = init_std
__A : Any = summary_type
__A : Dict = summary_use_proj
__A : Dict = summary_activation
__A : Dict = summary_proj_to_labels
__A : List[Any] = summary_first_dropout
__A : str = start_n_top
__A : Any = end_n_top
__A : Tuple = mask_token_id
__A : Tuple = lang_id
if "n_words" in kwargs:
__A : Dict = kwargs['n_words']
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , **_UpperCAmelCase)
class SCREAMING_SNAKE_CASE (a__ ):
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
if self.task == "multiple-choice":
__A : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__A : List[str] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
]) | 8 |
'''simple docstring'''
import argparse
import os
import re
lowercase__ : Optional[int] = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
lowercase__ : Dict = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase__ : List[str] = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase__ : Tuple = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase__ : str = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase__ : str = re.compile(r'''\[([^\]]+)\]''')
def _lowerCAmelCase ( __snake_case : str ) -> Tuple:
__A : List[Any] = _re_indent.search(__snake_case )
return "" if search is None else search.groups()[0]
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : str="" , __snake_case : Any=None , __snake_case : List[Any]=None ) -> Optional[int]:
__A : Tuple = 0
__A : Optional[int] = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(__snake_case ):
index += 1
__A : Optional[int] = ['\n'.join(lines[:index] )]
else:
__A : Any = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__A : Tuple = [lines[index]]
index += 1
while index < len(__snake_case ) and (end_prompt is None or not lines[index].startswith(__snake_case )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__snake_case ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(__snake_case ) )
if index < len(__snake_case ) - 1:
__A : Union[str, Any] = [lines[index + 1]]
index += 1
else:
__A : Union[str, Any] = []
else:
blocks.append('\n'.join(__snake_case ) )
__A : Optional[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__snake_case ) > 0:
blocks.append('\n'.join(__snake_case ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__snake_case ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def _lowerCAmelCase ( __snake_case : List[Any] ) -> int:
def _inner(__snake_case : List[Any] ):
return key(__snake_case ).lower().replace('_' , '' )
return _inner
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any=None ) -> List[Any]:
# If no key is provided, we use a noop.
def noop(__snake_case : List[Any] ):
return x
if key is None:
__A : Optional[Any] = noop
# Constants are all uppercase, they go first.
__A : str = [obj for obj in objects if key(__snake_case ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__A : List[str] = [obj for obj in objects if key(__snake_case )[0].isupper() and not key(__snake_case ).isupper()]
# Functions begin with a lowercase, they go last.
__A : str = [obj for obj in objects if not key(__snake_case )[0].isupper()]
__A : Tuple = ignore_underscore(__snake_case )
return sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case )
def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Tuple:
# This inner function sort imports between [ ].
def _replace(__snake_case : Tuple ):
__A : List[str] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
__A : int = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__A : Dict = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(__snake_case )] ) + "]"
__A : List[Any] = import_statement.split('\n' )
if len(__snake_case ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__A : Optional[int] = 2 if lines[1].strip() == '[' else 1
__A : Any = [(i, _re_strip_line.search(__snake_case ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__A : Optional[int] = sort_objects(__snake_case , key=lambda __snake_case : x[1] )
__A : Any = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__snake_case ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__A : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
__A : Dict = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__A : Tuple = keys[:-1]
__A : List[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(__snake_case )] )
return "\n".join(__snake_case )
else:
# Finally we have to deal with imports fitting on one line
__A : Optional[Any] = _re_bracket_content.sub(_replace , __snake_case )
return import_statement
def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : List[Any]=True ) -> Optional[Any]:
with open(__snake_case , 'r' ) as f:
__A : Dict = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__A : str = split_code_in_indented_blocks(
__snake_case , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__snake_case ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__A : Tuple = main_blocks[block_idx]
__A : int = block.split('\n' )
# Get to the start of the imports.
__A : Tuple = 0
while line_idx < len(__snake_case ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__A : Optional[int] = len(__snake_case )
else:
line_idx += 1
if line_idx >= len(__snake_case ):
continue
# Ignore beginning and last line: they don't contain anything.
__A : Dict = '\n'.join(block_lines[line_idx:-1] )
__A : int = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__A : Optional[int] = split_code_in_indented_blocks(__snake_case , indent_level=__snake_case )
# We have two categories of import key: list or _import_structure[key].append/extend
__A : Any = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__A : Dict = [(pattern.search(__snake_case ).groups()[0] if pattern.search(__snake_case ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__A : Optional[Any] = [(i, key) for i, key in enumerate(__snake_case ) if key is not None]
__A : Tuple = [x[0] for x in sorted(__snake_case , key=lambda __snake_case : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__A : str = 0
__A : Any = []
for i in range(len(__snake_case ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__A : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__snake_case )
count += 1
# And we put our main block back together with its first and last line.
__A : int = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__snake_case ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(__snake_case , 'w' ) as f:
f.write('\n'.join(__snake_case ) )
def _lowerCAmelCase ( __snake_case : int=True ) -> Optional[Any]:
__A : Tuple = []
for root, _, files in os.walk(__snake_case ):
if "__init__.py" in files:
__A : List[Any] = sort_imports(os.path.join(__snake_case , '__init__.py' ) , check_only=__snake_case )
if result:
__A : Dict = [os.path.join(__snake_case , '__init__.py' )]
if len(__snake_case ) > 0:
raise ValueError(f'Would overwrite {len(__snake_case )} files, run `make style`.' )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowercase__ : Union[str, Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only) | 8 | 1 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=True , _UpperCAmelCase=1 / 255 , _UpperCAmelCase=True , ):
'''simple docstring'''
__A : int = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
__A : List[Any] = parent
__A : Optional[Any] = batch_size
__A : Tuple = num_channels
__A : str = min_resolution
__A : List[Any] = max_resolution
__A : int = do_resize
__A : Any = size
__A : int = do_normalize
__A : Union[str, Any] = image_mean
__A : List[str] = image_std
__A : Dict = do_rescale
__A : List[str] = rescale_factor
__A : Union[str, Any] = do_pad
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
if not batched:
__A : str = image_inputs[0]
if isinstance(_UpperCAmelCase , Image.Image):
__A ,__A : Tuple = image.size
else:
__A ,__A : int = image.shape[1], image.shape[2]
if w < h:
__A : Optional[int] = int(self.size['shortest_edge'] * h / w)
__A : int = self.size['shortest_edge']
elif w > h:
__A : List[Any] = self.size['shortest_edge']
__A : Dict = int(self.size['shortest_edge'] * w / h)
else:
__A : Tuple = self.size['shortest_edge']
__A : List[Any] = self.size['shortest_edge']
else:
__A : List[Any] = []
for image in image_inputs:
__A ,__A : Union[str, Any] = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
__A : Union[str, Any] = max(_UpperCAmelCase , key=lambda _UpperCAmelCase: item[0])[0]
__A : Optional[int] = max(_UpperCAmelCase , key=lambda _UpperCAmelCase: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (a__ , unittest.TestCase ):
lowerCAmelCase = YolosImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = YolosImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_UpperCAmelCase , 'image_mean'))
self.assertTrue(hasattr(_UpperCAmelCase , 'image_std'))
self.assertTrue(hasattr(_UpperCAmelCase , 'do_normalize'))
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize'))
self.assertTrue(hasattr(_UpperCAmelCase , 'size'))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333})
self.assertEqual(image_processor.do_pad , _UpperCAmelCase)
__A : Any = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_UpperCAmelCase)
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84})
self.assertEqual(image_processor.do_pad , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image)
# Test not batched input
__A : Dict = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
__A ,__A : List[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A ,__A : str = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase)
__A : List[Any] = image_processing(_UpperCAmelCase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray)
# Test not batched input
__A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
__A ,__A : Tuple = self.image_processor_tester.get_expected_values(_UpperCAmelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Optional[Any] = image_processing(_UpperCAmelCase , return_tensors='pt').pixel_values
__A ,__A : List[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor)
# Test not batched input
__A : Any = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
__A ,__A : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Any = image_processing(_UpperCAmelCase , return_tensors='pt').pixel_values
__A ,__A : Optional[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.image_processing_class(**self.image_processor_dict)
__A : Dict = self.image_processing_class(do_resize=_UpperCAmelCase , do_normalize=_UpperCAmelCase , do_rescale=_UpperCAmelCase)
# create random PyTorch tensors
__A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor)
# Test whether the method "pad" and calling the image processor return the same tensors
__A : Any = image_processing_a.pad(_UpperCAmelCase , return_tensors='pt')
__A : Optional[int] = image_processing_a(_UpperCAmelCase , return_tensors='pt')
self.assertTrue(
torch.allclose(encoded_images_with_method['pixel_values'] , encoded_images['pixel_values'] , atol=1e-4))
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r') as f:
__A : int = json.loads(f.read())
__A : Any = {'image_id': 3_9769, 'annotations': target}
# encode them
__A : int = YolosImageProcessor.from_pretrained('hustvl/yolos-small')
__A : str = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , return_tensors='pt')
# verify pixel values
__A : List[Any] = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding['pixel_values'].shape , _UpperCAmelCase)
__A : List[str] = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _UpperCAmelCase , atol=1e-4))
# verify area
__A : Union[str, Any] = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _UpperCAmelCase))
# verify boxes
__A : Tuple = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape , _UpperCAmelCase)
__A : Optional[int] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _UpperCAmelCase , atol=1e-3))
# verify image_id
__A : Union[str, Any] = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _UpperCAmelCase))
# verify is_crowd
__A : List[str] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _UpperCAmelCase))
# verify class_labels
__A : int = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _UpperCAmelCase))
# verify orig_size
__A : Union[str, Any] = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _UpperCAmelCase))
# verify size
__A : List[str] = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _UpperCAmelCase))
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r') as f:
__A : Any = json.loads(f.read())
__A : Optional[int] = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
__A : Dict = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic')
# encode them
__A : List[Any] = YolosImageProcessor(format='coco_panoptic')
__A : Optional[int] = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , masks_path=_UpperCAmelCase , return_tensors='pt')
# verify pixel values
__A : str = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding['pixel_values'].shape , _UpperCAmelCase)
__A : int = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _UpperCAmelCase , atol=1e-4))
# verify area
__A : Union[str, Any] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _UpperCAmelCase))
# verify boxes
__A : Optional[Any] = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape , _UpperCAmelCase)
__A : Union[str, Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _UpperCAmelCase , atol=1e-3))
# verify image_id
__A : Union[str, Any] = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _UpperCAmelCase))
# verify is_crowd
__A : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _UpperCAmelCase))
# verify class_labels
__A : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _UpperCAmelCase))
# verify masks
__A : Tuple = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _UpperCAmelCase)
# verify orig_size
__A : str = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _UpperCAmelCase))
# verify size
__A : int = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _UpperCAmelCase)) | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int ) -> bool:
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
lowercase__ : int = int(input('''Enter number: ''').strip())
print(f"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""") | 8 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
lowercase__ : Optional[int] = None
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : List[str] = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
lowercase__ : Dict = {
'''camembert-base''': 5_12,
}
lowercase__ : str = '''▁'''
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
lowerCAmelCase = CamembertTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , **_UpperCAmelCase , ):
'''simple docstring'''
__A : int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else mask_token
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__A : List[str] = vocab_file
__A : Optional[int] = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A : Optional[Any] = [self.cls_token_id]
__A : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
__A : Optional[int] = [self.sep_token_id]
__A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(_UpperCAmelCase):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__A : List[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_UpperCAmelCase):
copyfile(self.vocab_file , _UpperCAmelCase)
return (out_vocab_file,) | 8 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : str = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Tuple:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__A : Optional[Any] = k.replace(__snake_case , __snake_case )
if k.startswith('encoder' ):
__A : Any = k.replace('.attn' , '.self_attn' )
__A : Any = k.replace('norm1' , 'self_attn_layer_norm' )
__A : str = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
__A : Tuple = k.replace('norm1' , 'self_attn_layer_norm' )
__A : str = k.replace('norm2' , 'encoder_attn_layer_norm' )
__A : int = k.replace('norm3' , 'final_layer_norm' )
return k
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Dict:
__A : Optional[int] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
__A : Tuple = sd.pop(__snake_case )
__A : Union[str, Any] = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
__A : str = v
lowercase__ : Tuple = ['''START''']
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any , __snake_case : List[Any] ) -> int:
__A : List[str] = torch.load(__snake_case , map_location='cpu' )
__A : Tuple = model['model']
__A : str = BlenderbotConfig.from_json_file(__snake_case )
__A : int = BlenderbotForConditionalGeneration(__snake_case )
__A : List[Any] = m.model.state_dict().keys()
__A : Optional[int] = []
__A : Optional[int] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__A : Union[str, Any] = rename_state_dict_key(__snake_case )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__A : Optional[Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__snake_case )
m.model.load_state_dict(__snake_case , strict=__snake_case )
m.half()
m.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json) | 8 | 1 |
'''simple docstring'''
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class SCREAMING_SNAKE_CASE (a__ ):
# to overwrite at feature extractactor specific tests
lowerCAmelCase = None
lowerCAmelCase = None
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return self.feat_extract_tester.prepare_feat_extract_dict()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(_UpperCAmelCase , 'feature_size'))
self.assertTrue(hasattr(_UpperCAmelCase , 'sampling_rate'))
self.assertTrue(hasattr(_UpperCAmelCase , 'padding_value'))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.feat_extract_tester.prepare_inputs_for_common()
__A : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict)
__A : Optional[Any] = feat_extract.model_input_names[0]
__A : Optional[Any] = BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(_UpperCAmelCase) == len(_UpperCAmelCase) for x, y in zip(_UpperCAmelCase , processed_features[input_name])))
__A : List[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_UpperCAmelCase)
__A : Optional[int] = BatchFeature({input_name: speech_inputs} , tensor_type='np')
__A : Dict = processed_features[input_name]
if len(batch_features_input.shape) < 3:
__A : str = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_torch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_UpperCAmelCase)
__A : Tuple = self.feature_extraction_class(**self.feat_extract_dict)
__A : Dict = feat_extract.model_input_names[0]
__A : Optional[Any] = BatchFeature({input_name: speech_inputs} , tensor_type='pt')
__A : Tuple = processed_features[input_name]
if len(batch_features_input.shape) < 3:
__A : int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_tf
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_UpperCAmelCase)
__A : str = self.feature_extraction_class(**self.feat_extract_dict)
__A : Dict = feat_extract.model_input_names[0]
__A : int = BatchFeature({input_name: speech_inputs} , tensor_type='tf')
__A : Tuple = processed_features[input_name]
if len(batch_features_input.shape) < 3:
__A : List[str] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase=False):
'''simple docstring'''
def _inputs_have_equal_length(_UpperCAmelCase):
__A : str = len(input[0])
for input_slice in input[1:]:
if len(_UpperCAmelCase) != length:
return False
return True
def _inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase):
if len(_UpperCAmelCase) != len(_UpperCAmelCase):
return False
for input_slice_a, input_slice_a in zip(_UpperCAmelCase , _UpperCAmelCase):
if not np.allclose(np.asarray(_UpperCAmelCase) , np.asarray(_UpperCAmelCase) , atol=1e-3):
return False
return True
__A : str = self.feature_extraction_class(**self.feat_extract_dict)
__A : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common(numpify=_UpperCAmelCase)
__A : Optional[Any] = feat_extract.model_input_names[0]
__A : Tuple = BatchFeature({input_name: speech_inputs})
__A : str = self.feat_extract_tester.seq_length_diff
__A : Tuple = self.feat_extract_tester.max_seq_length + pad_diff
__A : Union[str, Any] = self.feat_extract_tester.min_seq_length
__A : Union[str, Any] = self.feat_extract_tester.batch_size
__A : Any = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
__A : str = feat_extract.pad(_UpperCAmelCase , padding=_UpperCAmelCase)
__A : Optional[Any] = input_a[input_name]
__A : List[Any] = feat_extract.pad(_UpperCAmelCase , padding='longest')
__A : Optional[int] = input_a[input_name]
__A : Optional[Any] = feat_extract.pad(_UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[-1]))
__A : List[str] = input_a[input_name]
__A : Tuple = feat_extract.pad(_UpperCAmelCase , padding='longest' , return_tensors='np')
__A : List[str] = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_UpperCAmelCase):
feat_extract.pad(_UpperCAmelCase , padding='max_length')[input_name]
__A : Any = feat_extract.pad(
_UpperCAmelCase , padding='max_length' , max_length=_UpperCAmelCase , return_tensors='np')
__A : str = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase))
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase))
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase))
self.assertTrue(_inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase))
self.assertTrue(len(input_a[0]) == pad_min_length)
self.assertTrue(len(input_a[1]) == pad_min_length + pad_diff)
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0])))
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size)
# test padding for `pad_to_multiple_of` for List[int] + numpy
__A : Dict = feat_extract.pad(_UpperCAmelCase , pad_to_multiple_of=10)
__A : Optional[int] = input_a[input_name]
__A : Any = feat_extract.pad(_UpperCAmelCase , padding='longest' , pad_to_multiple_of=10)
__A : Optional[Any] = input_a[input_name]
__A : List[str] = feat_extract.pad(
_UpperCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=_UpperCAmelCase)
__A : Optional[int] = input_a[input_name]
__A : Optional[Any] = feat_extract.pad(
_UpperCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=_UpperCAmelCase , return_tensors='np' , )
__A : str = input_a[input_name]
self.assertTrue(all(len(_UpperCAmelCase) % 10 == 0 for x in input_a))
self.assertTrue(_inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase))
__A : List[str] = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_UpperCAmelCase) == expected_mult_pad_length for x in input_a))
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size)
# Check padding value is correct
__A : int = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length))
< 1e-3)
self.assertTrue(
abs(
np.asarray(input_a[1])[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff))
< 1e-3)
self.assertTrue(
abs(
np.asarray(input_a[2])[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff))
< 1e-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1e-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length))
< 1e-3)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase=False):
'''simple docstring'''
def _inputs_have_equal_length(_UpperCAmelCase):
__A : Optional[Any] = len(input[0])
for input_slice in input[1:]:
if len(_UpperCAmelCase) != length:
return False
return True
def _inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase):
if len(_UpperCAmelCase) != len(_UpperCAmelCase):
return False
for input_slice_a, input_slice_a in zip(_UpperCAmelCase , _UpperCAmelCase):
if not np.allclose(np.asarray(_UpperCAmelCase) , np.asarray(_UpperCAmelCase) , atol=1e-3):
return False
return True
__A : str = self.feature_extraction_class(**self.feat_extract_dict)
__A : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common(numpify=_UpperCAmelCase)
__A : Optional[int] = feat_extract.model_input_names[0]
__A : Union[str, Any] = BatchFeature({input_name: speech_inputs})
# truncate to smallest
__A : Tuple = feat_extract.pad(
_UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , truncation=_UpperCAmelCase)
__A : Tuple = input_a[input_name]
__A : int = feat_extract.pad(_UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]))
__A : int = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase))
self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase))
# truncate to smallest with np
__A : Tuple = feat_extract.pad(
_UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np' , truncation=_UpperCAmelCase , )
__A : List[Any] = input_a[input_name]
__A : Tuple = feat_extract.pad(
_UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np')
__A : Any = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase))
self.assertTrue(input_a.shape[1] == len(speech_inputs[0]))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase))
# truncate to middle
__A : str = feat_extract.pad(
_UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=_UpperCAmelCase , return_tensors='np' , )
__A : List[str] = input_a[input_name]
__A : Optional[Any] = feat_extract.pad(
_UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=_UpperCAmelCase)
__A : Any = input_a[input_name]
__A : Any = feat_extract.pad(
_UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , return_tensors='np')
__A : Tuple = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1]))
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase))
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase))
self.assertTrue(_inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase))
self.assertTrue(len(input_a[-1]) == len(speech_inputs[-1]))
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_UpperCAmelCase):
feat_extract.pad(_UpperCAmelCase , truncation=_UpperCAmelCase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_UpperCAmelCase):
feat_extract.pad(_UpperCAmelCase , padding='longest' , truncation=_UpperCAmelCase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_UpperCAmelCase):
feat_extract.pad(_UpperCAmelCase , padding='longest' , truncation=_UpperCAmelCase)[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_UpperCAmelCase):
feat_extract.pad(_UpperCAmelCase , padding='max_length' , truncation=_UpperCAmelCase)[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
__A : int = 12
__A : Optional[Any] = feat_extract.pad(
_UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=_UpperCAmelCase , truncation=_UpperCAmelCase , )
__A : str = input_a[input_name]
__A : Union[str, Any] = feat_extract.pad(
_UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=_UpperCAmelCase , )
__A : Optional[Any] = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
__A : List[str] = len(speech_inputs[0])
if expected_length % pad_to_multiple_of != 0:
__A : Optional[Any] = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0]) == expected_length)
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase))
self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self._check_padding(numpify=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self._check_padding(numpify=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self._check_truncation(numpify=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self._check_truncation(numpify=_UpperCAmelCase)
@require_torch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.feature_extraction_class(**self.feat_extract_dict)
__A : int = self.feat_extract_tester.prepare_inputs_for_common()
__A : Union[str, Any] = feat_extract.model_input_names[0]
__A : str = BatchFeature({input_name: speech_inputs})
__A : int = feat_extract.pad(_UpperCAmelCase , padding='longest' , return_tensors='np')[input_name]
__A : Optional[int] = feat_extract.pad(_UpperCAmelCase , padding='longest' , return_tensors='pt')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1e-2)
@require_tf
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict)
__A : Tuple = self.feat_extract_tester.prepare_inputs_for_common()
__A : Optional[Any] = feat_extract.model_input_names[0]
__A : Union[str, Any] = BatchFeature({input_name: speech_inputs})
__A : Optional[Any] = feat_extract.pad(_UpperCAmelCase , padding='longest' , return_tensors='np')[input_name]
__A : str = feat_extract.pad(_UpperCAmelCase , padding='longest' , return_tensors='tf')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_tf.numpy().astype(np.floataa).sum()) < 1e-2)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.feat_extract_dict
__A : List[str] = True
__A : List[Any] = self.feature_extraction_class(**_UpperCAmelCase)
__A : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
__A : Optional[Any] = [len(_UpperCAmelCase) for x in speech_inputs]
__A : Optional[Any] = feat_extract.model_input_names[0]
__A : Optional[Any] = BatchFeature({input_name: speech_inputs})
__A : List[str] = feat_extract.pad(_UpperCAmelCase , padding='longest' , return_tensors='np')
self.assertIn('attention_mask' , _UpperCAmelCase)
self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist() , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = self.feat_extract_dict
__A : Optional[int] = True
__A : List[str] = self.feature_extraction_class(**_UpperCAmelCase)
__A : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common()
__A : str = [len(_UpperCAmelCase) for x in speech_inputs]
__A : Optional[Any] = feat_extract.model_input_names[0]
__A : List[Any] = BatchFeature({input_name: speech_inputs})
__A : Dict = min(_UpperCAmelCase)
__A : List[str] = feat_extract.pad(
_UpperCAmelCase , padding='max_length' , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='np')
self.assertIn('attention_mask' , _UpperCAmelCase)
self.assertListEqual(
list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length])
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs]) | 8 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None):
'''simple docstring'''
__A : List[Any] = list(poly_a or [0])[:]
__A : Optional[int] = list(poly_b or [0])[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
__A : Union[str, Any] = len(self.polyA)
while self.polyB[-1] == 0:
self.polyB.pop()
__A : Optional[int] = len(self.polyB)
# Add 0 to make lengths equal a power of 2
__A : Optional[Any] = int(
2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1)))
while len(self.polyA) < self.c_max_length:
self.polyA.append(0)
while len(self.polyB) < self.c_max_length:
self.polyB.append(0)
# A complex root used for the fourier transform
__A : str = complex(mpmath.root(x=1 , n=self.c_max_length , k=1))
# The product
__A : Tuple = self.__multiply()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(_UpperCAmelCase) <= 1:
return dft[0]
#
__A : Dict = self.c_max_length // 2
while next_ncol > 0:
__A : Optional[Any] = [[] for i in range(_UpperCAmelCase)]
__A : Tuple = self.root**next_ncol
# First half of next step
__A : Optional[Any] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(_UpperCAmelCase):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j])
current_root *= root
# Second half of next step
__A : List[str] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(_UpperCAmelCase):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j])
current_root *= root
# Update
__A : Optional[int] = new_dft
__A : Tuple = next_ncol // 2
return dft[0]
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.__dft('A')
__A : Optional[Any] = self.__dft('B')
__A : str = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0]) <= 1:
return inverce_c[0]
# Inverse DFT
__A : Dict = 2
while next_ncol <= self.c_max_length:
__A : Optional[int] = [[] for i in range(_UpperCAmelCase)]
__A : Any = self.root ** (next_ncol // 2)
__A : Tuple = 1
# First half of next step
for j in range(self.c_max_length // next_ncol):
for i in range(next_ncol // 2):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2)
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root))
current_root *= root
# Update
__A : int = new_inverse_c
next_ncol *= 2
# Unpack
__A : Optional[int] = [round(x[0].real , 8) + round(x[0].imag , 8) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self):
'''simple docstring'''
__A : int = 'A = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A]))
__A : Optional[Any] = 'B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B]))
__A : str = 'A*B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.product))
return F'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def _lowerCAmelCase ( __snake_case : list[float] ) -> Optional[int]:
return np.maximum(0 , __snake_case )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5] | 8 |
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=[30, 30] , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=None , _UpperCAmelCase=8 , _UpperCAmelCase=10 , ):
'''simple docstring'''
__A : Union[str, Any] = parent
__A : Tuple = batch_size
__A : List[str] = image_size
__A : Dict = patch_size
__A : Optional[Any] = num_channels
__A : Tuple = is_training
__A : Dict = use_labels
__A : List[Any] = hidden_size
__A : Tuple = num_hidden_layers
__A : int = num_attention_heads
__A : Optional[int] = intermediate_size
__A : Tuple = hidden_act
__A : Any = hidden_dropout_prob
__A : Optional[Any] = attention_probs_dropout_prob
__A : List[Any] = type_sequence_label_size
__A : List[Any] = initializer_range
__A : Optional[int] = num_labels
__A : List[Any] = scope
__A : Any = n_targets
__A : Union[str, Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
__A : List[str] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
__A : int = num_patches + 1 + self.num_detection_tokens
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]])
__A : Tuple = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
__A : List[Any] = []
for i in range(self.batch_size):
__A : Optional[int] = {}
__A : Union[str, Any] = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=_UpperCAmelCase)
__A : str = torch.rand(self.n_targets , 4 , device=_UpperCAmelCase)
labels.append(_UpperCAmelCase)
__A : Any = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = YolosModel(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = YolosForObjectDetection(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : str = model(pixel_values=_UpperCAmelCase)
__A : List[str] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
__A : Union[str, Any] = model(pixel_values=_UpperCAmelCase , labels=_UpperCAmelCase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.prepare_config_and_inputs()
__A ,__A ,__A : Tuple = config_and_inputs
__A : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCAmelCase = (
{'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
__A : Optional[Any] = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase)
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
__A : Any = []
for i in range(self.model_tester.batch_size):
__A : Tuple = {}
__A : Tuple = torch.ones(
size=(self.model_tester.n_targets,) , device=_UpperCAmelCase , dtype=torch.long)
__A : Optional[Any] = torch.ones(
self.model_tester.n_targets , 4 , device=_UpperCAmelCase , dtype=torch.float)
labels.append(_UpperCAmelCase)
__A : str = labels
return inputs_dict
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = YolosModelTester(self)
__A : Dict = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Tuple = model_class(_UpperCAmelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__A : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[Any] = model_class(_UpperCAmelCase)
__A : str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : int = [*signature.parameters.keys()]
__A : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__A : Optional[int] = True
# in YOLOS, the seq_len is different
__A : Dict = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
__A : Dict = True
__A : Dict = False
__A : Union[str, Any] = True
__A : Tuple = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : Any = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Union[str, Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__A : List[Any] = True
__A : List[str] = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Optional[Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__A : str = len(_UpperCAmelCase)
# Check attention is always last and order is fine
__A : Dict = True
__A : Dict = True
__A : Dict = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Union[str, Any] = 1
self.assertEqual(out_len + added_hidden_states , len(_UpperCAmelCase))
__A : Optional[Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
__A : Tuple = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Optional[Any] = outputs.hidden_states
__A : List[str] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
# YOLOS has a different seq_length
__A : Dict = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[str] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : Optional[int] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : List[Any] = YolosModel.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
def _lowerCAmelCase ( ) -> int:
__A : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('hustvl/yolos-small') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = YolosForObjectDetection.from_pretrained('hustvl/yolos-small').to(_UpperCAmelCase)
__A : Any = self.default_image_processor
__A : str = prepare_img()
__A : int = image_processor(images=_UpperCAmelCase , return_tensors='pt').to(_UpperCAmelCase)
# forward pass
with torch.no_grad():
__A : str = model(inputs.pixel_values)
# verify outputs
__A : Tuple = torch.Size((1, 100, 92))
self.assertEqual(outputs.logits.shape , _UpperCAmelCase)
__A : Dict = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=_UpperCAmelCase , )
__A : int = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=_UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _UpperCAmelCase , atol=1e-4))
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _UpperCAmelCase , atol=1e-4))
# verify postprocessing
__A : List[str] = image_processor.post_process_object_detection(
_UpperCAmelCase , threshold=0.3 , target_sizes=[image.size[::-1]])[0]
__A : Optional[int] = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861]).to(_UpperCAmelCase)
__A : Union[str, Any] = [75, 75, 17, 63, 17]
__A : Any = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495]).to(_UpperCAmelCase)
self.assertEqual(len(results['scores']) , 5)
self.assertTrue(torch.allclose(results['scores'] , _UpperCAmelCase , atol=1e-4))
self.assertSequenceEqual(results['labels'].tolist() , _UpperCAmelCase)
self.assertTrue(torch.allclose(results['boxes'][0, :] , _UpperCAmelCase)) | 8 | 1 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
lowercase__ : str = 1_00
lowercase__ : Tuple = set(range(3, NUM_PRIMES, 2))
primes.add(2)
lowercase__ : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def _lowerCAmelCase ( __snake_case : int ) -> set[int]:
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
__A : set[int] = set()
__A : int
__A : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def _lowerCAmelCase ( __snake_case : int = 50_00 ) -> int | None:
for number_to_partition in range(1 , __snake_case ):
if len(partition(__snake_case ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f"""{solution() = }""") | 8 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
lowercase__ : Optional[int] = None
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : List[str] = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
lowercase__ : Dict = {
'''camembert-base''': 5_12,
}
lowercase__ : str = '''▁'''
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
lowerCAmelCase = CamembertTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , **_UpperCAmelCase , ):
'''simple docstring'''
__A : int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else mask_token
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__A : List[str] = vocab_file
__A : Optional[int] = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A : Optional[Any] = [self.cls_token_id]
__A : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
__A : Optional[int] = [self.sep_token_id]
__A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(_UpperCAmelCase):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__A : List[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_UpperCAmelCase):
copyfile(self.vocab_file , _UpperCAmelCase)
return (out_vocab_file,) | 8 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : list ) -> list:
__A : Dict = False
while is_sorted is False: # Until all the indices are traversed keep looping
__A : int = True
for i in range(0 , len(__snake_case ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__A ,__A : List[Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
__A : int = False
for i in range(1 , len(__snake_case ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__A ,__A : Optional[int] = input_list[i + 1], input_list[i]
# swapping if elements not in order
__A : str = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
lowercase__ : Dict = [int(x) for x in input().split()]
# inputing elements of the list in one line
lowercase__ : Union[str, Any] = odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list) | 8 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowercase__ : Any = '''hf-internal-testing/tiny-random-bert'''
lowercase__ : Optional[Any] = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
lowercase__ : List[Any] = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase)
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_UpperCAmelCase))
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_UpperCAmelCase , _UpperCAmelCase)))
with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f:
__A : Any = f.read()
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase))
self.assertTrue(os.path.isfile(_UpperCAmelCase))
# File is cached at the same place the second time.
__A : Tuple = cached_file(_UpperCAmelCase , _UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
# Using a specific revision to test the full commit hash.
__A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='9b8c223')
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'):
__A : Dict = cached_file('tiny-random-bert' , _UpperCAmelCase)
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'):
__A : Optional[int] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='aaaa')
with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'):
__A : int = cached_file(_UpperCAmelCase , 'conf')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'):
__A : Any = cached_file(_UpperCAmelCase , 'conf')
with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f:
__A : Dict = f.read()
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , '.no_exist' , _UpperCAmelCase , 'conf')))
__A : List[Any] = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_missing_entries=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
__A : str = cached_file(_UpperCAmelCase , 'conf' , local_files_only=_UpperCAmelCase , _raise_exceptions_for_missing_entries=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
__A : List[str] = mock.Mock()
__A : Dict = 500
__A : List[str] = {}
__A : List[Any] = HTTPError
__A : Optional[Any] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=_UpperCAmelCase) as mock_head:
__A : Dict = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_connection_errors=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt'))
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'):
get_file_from_repo('bert-base-case' , _UpperCAmelCase)
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'):
get_file_from_repo('bert-base-cased' , _UpperCAmelCase , revision='ahaha')
__A : List[str] = get_file_from_repo('bert-base-cased' , _UpperCAmelCase)
# The name is the cached name which is not very easy to test, so instead we load the content.
__A : List[str] = json.loads(open(_UpperCAmelCase , 'r').read())
self.assertEqual(config['hidden_size'] , 768)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__A : Tuple = Path(_UpperCAmelCase) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(_UpperCAmelCase , 'a.txt') , str(_UpperCAmelCase))
self.assertIsNone(get_file_from_repo(_UpperCAmelCase , 'b.txt')) | 8 | 1 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase__ : str = 16
lowercase__ : Optional[int] = 32
def _lowerCAmelCase ( __snake_case : Accelerator , __snake_case : int = 16 ) -> Any:
__A : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
__A : str = load_dataset('glue' , 'mrpc' )
def tokenize_function(__snake_case : str ):
# max_length=None => use the model max length (it's actually the default)
__A : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__A : int = datasets.map(
__snake_case , batched=__snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__A : Optional[int] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__snake_case : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__A : Optional[int] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__A : int = 16
elif accelerator.mixed_precision != "no":
__A : Optional[int] = 8
else:
__A : Any = None
return tokenizer.pad(
__snake_case , padding='longest' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='pt' , )
# Instantiate dataloaders.
__A : str = DataLoader(
tokenized_datasets['train'] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
__A : Optional[Any] = DataLoader(
tokenized_datasets['validation'] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase__ : Tuple = mocked_dataloaders # noqa: F811
def _lowerCAmelCase ( __snake_case : Optional[int] , __snake_case : int ) -> Optional[Any]:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , __snake_case ) == "1":
__A : List[str] = 2
# Initialize accelerator
__A : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__A : Any = config['lr']
__A : Any = int(config['num_epochs'] )
__A : List[str] = int(config['seed'] )
__A : List[str] = int(config['batch_size'] )
__A : Dict = evaluate.load('glue' , 'mrpc' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__snake_case )
def inner_training_loop(__snake_case : int ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__A : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__A : Optional[Any] = model.to(accelerator.device )
# Instantiate optimizer
__A : Optional[int] = AdamW(params=model.parameters() , lr=__snake_case )
__A ,__A : Tuple = get_dataloaders(__snake_case , __snake_case )
# Instantiate scheduler
__A : Tuple = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=1_00 , num_training_steps=(len(__snake_case ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__A ,__A ,__A ,__A ,__A : List[Any] = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Now we train the model
for epoch in range(__snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__A : str = model(**__snake_case )
__A : Union[str, Any] = outputs.loss
accelerator.backward(__snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__A : Tuple = model(**__snake_case )
__A : List[Any] = outputs.logits.argmax(dim=-1 )
__A ,__A : Any = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
__A : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , __snake_case )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def _lowerCAmelCase ( ) -> Tuple:
__A : Tuple = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=__snake_case , default=__snake_case , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
__A : Optional[Any] = parser.parse_args()
__A : Union[str, Any] = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main() | 8 |
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _lowerCAmelCase ( __snake_case : str , __snake_case : str , **__snake_case : List[Any] ) -> Any:
__A : Optional[Any] = AutoConfig.from_pretrained(__snake_case , **__snake_case )
__A : int = AutoModelForSeqaSeqLM.from_config(__snake_case )
model.save_pretrained(__snake_case )
AutoTokenizer.from_pretrained(__snake_case ).save_pretrained(__snake_case )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version) | 8 | 1 |
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = inspect.getfile(accelerate.test_utils)
__A : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ['scripts', 'test_script.py'])
__A : Union[str, Any] = os.path.sep.join(inspect.getfile(self.__class__).split(os.path.sep)[:-1])
@require_tpu
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = F'\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n '.split()
__A : Optional[int] = [sys.executable] + distributed_args
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy()) | 8 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
lowercase__ : Any = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''tapas'''
def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1024 , _UpperCAmelCase=[3, 256, 256, 2, 256, 256, 10] , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase=10.0 , _UpperCAmelCase=0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase="ratio" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase)
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__A : Dict = vocab_size
__A : Tuple = hidden_size
__A : Any = num_hidden_layers
__A : int = num_attention_heads
__A : Tuple = hidden_act
__A : Tuple = intermediate_size
__A : List[Any] = hidden_dropout_prob
__A : int = attention_probs_dropout_prob
__A : List[str] = max_position_embeddings
__A : Optional[int] = type_vocab_sizes
__A : str = initializer_range
__A : List[str] = layer_norm_eps
# Fine-tuning task hyperparameters
__A : List[str] = positive_label_weight
__A : List[Any] = num_aggregation_labels
__A : Optional[Any] = aggregation_loss_weight
__A : Tuple = use_answer_as_supervision
__A : List[str] = answer_loss_importance
__A : Any = use_normalized_answer_loss
__A : Any = huber_loss_delta
__A : Union[str, Any] = temperature
__A : Tuple = aggregation_temperature
__A : Optional[Any] = use_gumbel_for_cells
__A : List[str] = use_gumbel_for_aggregation
__A : Tuple = average_approximation_function
__A : List[str] = cell_selection_preference
__A : Dict = answer_loss_cutoff
__A : Union[str, Any] = max_num_rows
__A : Optional[Any] = max_num_columns
__A : int = average_logits_per_cell
__A : Optional[Any] = select_one_column
__A : int = allow_empty_column_selection
__A : List[Any] = init_cell_selection_weights_to_zero
__A : int = reset_position_index_per_cell
__A : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
__A : Optional[Any] = aggregation_labels
__A : List[str] = no_aggregation_label_index
if isinstance(self.aggregation_labels , _UpperCAmelCase):
__A : Optional[Any] = {int(_UpperCAmelCase): v for k, v in aggregation_labels.items()} | 8 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : float , __snake_case : int ) -> float:
if digit_amount > 0:
return round(number - int(__snake_case ) , __snake_case )
return number - int(__snake_case )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3)) | 8 |
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=sys.maxsize):
'''simple docstring'''
__A : Union[str, Any] = 'bilinear'
__A : int = max_size
__A : Optional[Any] = short_edge_length
def __call__( self , _UpperCAmelCase):
'''simple docstring'''
__A : int = []
for img in imgs:
__A ,__A : Dict = img.shape[:2]
# later: provide list and randomly choose index for resize
__A : List[Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
__A : Tuple = size * 1.0 / min(_UpperCAmelCase , _UpperCAmelCase)
if h < w:
__A ,__A : Optional[Any] = size, scale * w
else:
__A ,__A : Optional[Any] = scale * h, size
if max(_UpperCAmelCase , _UpperCAmelCase) > self.max_size:
__A : Tuple = self.max_size * 1.0 / max(_UpperCAmelCase , _UpperCAmelCase)
__A : Tuple = newh * scale
__A : Dict = neww * scale
__A : Dict = int(neww + 0.5)
__A : Optional[int] = int(newh + 0.5)
if img.dtype == np.uinta:
__A : int = Image.fromarray(_UpperCAmelCase)
__A : Optional[int] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
__A : Dict = np.asarray(_UpperCAmelCase)
else:
__A : Optional[Any] = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
__A : Dict = nn.functional.interpolate(
_UpperCAmelCase , (newh, neww) , mode=self.interp_method , align_corners=_UpperCAmelCase).squeeze(0)
img_augs.append(_UpperCAmelCase)
return img_augs
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
__A : List[Any] = cfg.INPUT.FORMAT
__A : Dict = cfg.SIZE_DIVISIBILITY
__A : str = cfg.PAD_VALUE
__A : Union[str, Any] = cfg.INPUT.MAX_SIZE_TEST
__A : int = cfg.MODEL.DEVICE
__A : Tuple = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__A : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__A : int = lambda _UpperCAmelCase: (x - self.pixel_mean) / self.pixel_std
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = tuple(max(_UpperCAmelCase) for s in zip(*[img.shape for img in images]))
__A : Dict = [im.shape[-2:] for im in images]
__A : Optional[int] = [
nn.functional.pad(
_UpperCAmelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_UpperCAmelCase , _UpperCAmelCase)
]
return torch.stack(_UpperCAmelCase), torch.tensor(_UpperCAmelCase)
def __call__( self , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
with torch.no_grad():
if not isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : int = [images]
if single_image:
assert len(_UpperCAmelCase) == 1
for i in range(len(_UpperCAmelCase)):
if isinstance(images[i] , torch.Tensor):
images.insert(_UpperCAmelCase , images.pop(_UpperCAmelCase).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
_UpperCAmelCase , torch.as_tensor(img_tensorize(images.pop(_UpperCAmelCase) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
__A : str = torch.tensor([im.shape[:2] for im in images])
__A : List[str] = self.aug(_UpperCAmelCase)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__A : Any = [self.normalizer(_UpperCAmelCase) for x in images]
# now pad them to do the following operations
__A ,__A : Any = self.pad(_UpperCAmelCase)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__A : str = torch.true_divide(_UpperCAmelCase , _UpperCAmelCase)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : str ) -> Dict:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : Tuple[int, int] ) -> int:
assert torch.isfinite(__snake_case ).all(), "Box tensor contains infinite or NaN!"
__A ,__A : int = box_size
tensor[:, 0].clamp_(min=0 , max=__snake_case )
tensor[:, 1].clamp_(min=0 , max=__snake_case )
tensor[:, 2].clamp_(min=0 , max=__snake_case )
tensor[:, 3].clamp_(min=0 , max=__snake_case ) | 8 | 1 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class SCREAMING_SNAKE_CASE :
@staticmethod
def SCREAMING_SNAKE_CASE ( *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
pass
def _lowerCAmelCase ( __snake_case : Tuple ) -> Union[str, Any]:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
lowercase__ : List[Any] = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
lowerCAmelCase = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = pipeline(
'document-question-answering' , model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : Optional[int] = INVOICE_URL
__A : Any = list(zip(*apply_tesseract(load_image(_UpperCAmelCase) , _UpperCAmelCase , '')))
__A : Tuple = 'What is the placebo?'
__A : List[Any] = [
{
'image': load_image(_UpperCAmelCase),
'question': question,
},
{
'image': image,
'question': question,
},
{
'image': image,
'question': question,
'word_boxes': word_boxes,
},
]
return dqa_pipeline, examples
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : str = dqa_pipeline(_UpperCAmelCase , top_k=2)
self.assertEqual(
_UpperCAmelCase , [
[
{'score': ANY(_UpperCAmelCase), 'answer': ANY(_UpperCAmelCase), 'start': ANY(_UpperCAmelCase), 'end': ANY(_UpperCAmelCase)},
{'score': ANY(_UpperCAmelCase), 'answer': ANY(_UpperCAmelCase), 'start': ANY(_UpperCAmelCase), 'end': ANY(_UpperCAmelCase)},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2')
__A : Any = INVOICE_URL
__A : List[str] = 'How many cats are there?'
__A : Union[str, Any] = [
{'score': 0.0001, 'answer': 'oy 2312/2019', 'start': 38, 'end': 39},
{'score': 0.0001, 'answer': 'oy 2312/2019 DUE', 'start': 38, 'end': 40},
]
__A : Optional[Any] = dqa_pipeline(image=_UpperCAmelCase , question=_UpperCAmelCase , top_k=2)
self.assertEqual(nested_simplify(_UpperCAmelCase , decimals=4) , _UpperCAmelCase)
__A : Union[str, Any] = dqa_pipeline({'image': image, 'question': question} , top_k=2)
self.assertEqual(nested_simplify(_UpperCAmelCase , decimals=4) , _UpperCAmelCase)
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__A : List[Any] = './tests/fixtures/tests_samples/COCO/000000039769.png'
__A : List[str] = dqa_pipeline(image=_UpperCAmelCase , question=_UpperCAmelCase , top_k=2)
self.assertEqual(_UpperCAmelCase , [])
# We can optionnally pass directly the words and bounding boxes
__A : Tuple = './tests/fixtures/tests_samples/COCO/000000039769.png'
__A : str = []
__A : str = []
__A : Any = dqa_pipeline(image=_UpperCAmelCase , question=_UpperCAmelCase , words=_UpperCAmelCase , boxes=_UpperCAmelCase , top_k=2)
self.assertEqual(_UpperCAmelCase , [])
@slow
@require_torch
@require_detectrona
@require_pytesseract
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , )
__A : Optional[Any] = INVOICE_URL
__A : int = 'What is the invoice number?'
__A : Tuple = dqa_pipeline(image=_UpperCAmelCase , question=_UpperCAmelCase , top_k=2)
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'score': 0.9944, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0009, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
__A : Union[str, Any] = dqa_pipeline({'image': image, 'question': question} , top_k=2)
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'score': 0.9944, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0009, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
__A : Optional[Any] = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2)
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
[
{'score': 0.9944, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0009, 'answer': 'us-001', 'start': 16, 'end': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=50 , )
__A : Optional[int] = INVOICE_URL
__A : List[str] = 'What is the invoice number?'
__A : List[str] = dqa_pipeline(image=_UpperCAmelCase , question=_UpperCAmelCase , top_k=2)
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'score': 0.9974, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9948, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
__A : Tuple = dqa_pipeline({'image': image, 'question': question} , top_k=2)
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'score': 0.9974, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9948, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
__A : List[str] = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2)
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
[
{'score': 0.9974, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9948, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=_UpperCAmelCase)
__A : List[Any] = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=_UpperCAmelCase , revision='3dc6de3' , )
__A : Tuple = INVOICE_URL
__A : List[Any] = 'What is the invoice number?'
__A : Dict = dqa_pipeline(image=_UpperCAmelCase , question=_UpperCAmelCase , top_k=2)
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'score': 0.4251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0819, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
__A : List[Any] = dqa_pipeline({'image': image, 'question': question} , top_k=2)
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'score': 0.4251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0819, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
__A : Optional[int] = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2)
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
[
{'score': 0.4251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0819, 'answer': '1110212019', 'start': 23, 'end': 23},
]
]
* 2 , )
__A : Tuple = list(zip(*apply_tesseract(load_image(_UpperCAmelCase) , _UpperCAmelCase , '')))
# This model should also work if `image` is set to None
__A : Union[str, Any] = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2)
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'score': 0.4251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0819, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=_UpperCAmelCase)
__A : Dict = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=_UpperCAmelCase , revision='3dc6de3' , max_seq_len=50 , )
__A : str = INVOICE_URL
__A : List[Any] = 'What is the invoice number?'
__A : Tuple = dqa_pipeline(image=_UpperCAmelCase , question=_UpperCAmelCase , top_k=2)
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'score': 0.9999, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9998, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
__A : Any = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2)
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
[
{'score': 0.9999, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9998, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
__A : Tuple = list(zip(*apply_tesseract(load_image(_UpperCAmelCase) , _UpperCAmelCase , '')))
# This model should also work if `image` is set to None
__A : Any = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2)
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'score': 0.9999, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9998, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = pipeline(
'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa') , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , )
__A : int = INVOICE_URL
__A : Union[str, Any] = 'What is the invoice number?'
__A : List[Any] = dqa_pipeline(image=_UpperCAmelCase , question=_UpperCAmelCase , top_k=2)
self.assertEqual(nested_simplify(_UpperCAmelCase , decimals=4) , [{'answer': 'us-001'}])
@require_tf
@unittest.skip('Document question answering not implemented in TF')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Optional[Any]: # noqa: E741
__A : Tuple = len(__snake_case )
__A : Optional[int] = 0
__A : str = [0] * n
__A : int = [False] * n
__A : Tuple = [False] * n
def dfs(__snake_case : List[str] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : int ):
if parent == root:
out_edge_count += 1
__A : str = True
__A : Tuple = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__A : Optional[int] = dfs(__snake_case , __snake_case , __snake_case , __snake_case )
__A : int = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__A : Tuple = True
# AP found via cycle
if at == low[to]:
__A : Optional[Any] = True
else:
__A : Any = min(low[at] , __snake_case )
return out_edge_count
for i in range(__snake_case ):
if not visited[i]:
__A : Tuple = 0
__A : List[Any] = dfs(__snake_case , __snake_case , -1 , __snake_case )
__A : Union[str, Any] = out_edge_count > 1
for x in range(len(__snake_case ) ):
if is_art[x] is True:
print(__snake_case )
# Adjacency list of graph
lowercase__ : Tuple = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data) | 8 | 1 |
'''simple docstring'''
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE (a__ ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase)
@torch.no_grad()
def __call__( self , _UpperCAmelCase = 1 , _UpperCAmelCase = None , _UpperCAmelCase = 50 , _UpperCAmelCase = "pil" , _UpperCAmelCase = True , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Tuple = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=_UpperCAmelCase , )
__A : Union[str, Any] = image.to(self.device)
# set step values
self.scheduler.set_timesteps(_UpperCAmelCase)
for t in self.progress_bar(self.scheduler.timesteps):
# 1. predict noise model_output
__A : Union[str, Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__A : List[str] = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase).prev_sample
__A : Tuple = (image / 2 + 0.5).clamp(0 , 1)
__A : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
__A : Dict = self.numpy_to_pil(_UpperCAmelCase)
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=_UpperCAmelCase), "This is a local test" | 8 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : int = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''',
'''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''',
'''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
lowercase__ : Dict = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _lowerCAmelCase ( __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Any , __snake_case : List[str] ) -> Union[str, Any]:
for attribute in key.split('.' ):
__A : int = getattr(__snake_case , __snake_case )
if weight_type is not None:
__A : Optional[int] = getattr(__snake_case , __snake_case ).shape
else:
__A : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
__A : Tuple = value
elif weight_type == "weight_g":
__A : Union[str, Any] = value
elif weight_type == "weight_v":
__A : Optional[Any] = value
elif weight_type == "bias":
__A : Optional[int] = value
else:
__A : Optional[int] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCAmelCase ( __snake_case : Any , __snake_case : List[str] ) -> List[Any]:
__A : Optional[Any] = []
__A : Any = fairseq_model.state_dict()
__A : Union[str, Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__A : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == 'group' , )
__A : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__A : int = True
if "*" in mapped_key:
__A : Any = name.split(__snake_case )[0].split('.' )[-2]
__A : List[Any] = mapped_key.replace('*' , __snake_case )
if "weight_g" in name:
__A : Optional[Any] = 'weight_g'
elif "weight_v" in name:
__A : Union[str, Any] = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
__A : Optional[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__A : Tuple = 'weight'
else:
__A : Dict = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f'Unused weights: {unused_weights}' )
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Optional[int] ) -> int:
__A : int = full_name.split('conv_layers.' )[-1]
__A : List[str] = name.split('.' )
__A : Optional[int] = int(items[0] )
__A : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
__A : Optional[int] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
__A : Union[str, Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
__A : Dict = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
__A : Any = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple=None ) -> Any:
# load the pre-trained checkpoints
__A : List[str] = torch.load(__snake_case )
__A : Dict = WavLMConfigOrig(checkpoint['cfg'] )
__A : Optional[int] = WavLMOrig(__snake_case )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
__A : List[Any] = WavLMConfig.from_pretrained(__snake_case )
else:
__A : Dict = WavLMConfig()
__A : Optional[Any] = WavLMModel(__snake_case )
recursively_load_weights(__snake_case , __snake_case )
hf_wavlm.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowercase__ : Any = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 8 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> str | Literal[False]:
__A : Optional[int] = list(__snake_case )
__A : int = list(__snake_case )
__A : List[str] = 0
for i in range(len(__snake_case ) ):
if lista[i] != lista[i]:
count += 1
__A : Tuple = '_'
if count > 1:
return False
else:
return "".join(__snake_case )
def _lowerCAmelCase ( __snake_case : list[str] ) -> list[str]:
__A : List[str] = []
while True:
__A : Optional[int] = ['$'] * len(__snake_case )
__A : Dict = []
for i in range(len(__snake_case ) ):
for j in range(i + 1 , len(__snake_case ) ):
__A : Any = compare_string(binary[i] , binary[j] )
if k is False:
__A : List[str] = '*'
__A : List[str] = '*'
temp.append('X' )
for i in range(len(__snake_case ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__snake_case ) == 0:
return pi
__A : Union[str, Any] = list(set(__snake_case ) )
def _lowerCAmelCase ( __snake_case : int , __snake_case : Sequence[float] ) -> list[str]:
__A : Optional[int] = []
for minterm in minterms:
__A : Tuple = ''
for _ in range(__snake_case ):
__A : int = str(minterm % 2 ) + string
minterm //= 2
temp.append(__snake_case )
return temp
def _lowerCAmelCase ( __snake_case : str , __snake_case : str , __snake_case : int ) -> bool:
__A : str = list(__snake_case )
__A : List[Any] = list(__snake_case )
__A : List[Any] = 0
for i in range(len(__snake_case ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def _lowerCAmelCase ( __snake_case : list[list[int]] , __snake_case : list[str] ) -> list[str]:
__A : List[str] = []
__A : Any = [0] * len(__snake_case )
for i in range(len(chart[0] ) ):
__A : List[str] = 0
__A : int = -1
for j in range(len(__snake_case ) ):
if chart[j][i] == 1:
count += 1
__A : Any = j
if count == 1:
__A : Dict = 1
for i in range(len(__snake_case ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__snake_case ) ):
__A : Optional[Any] = 0
temp.append(prime_implicants[i] )
while True:
__A : List[str] = 0
__A : Optional[Any] = -1
__A : Tuple = 0
for i in range(len(__snake_case ) ):
__A : Optional[Any] = chart[i].count(1 )
if count_n > max_n:
__A : List[Any] = count_n
__A : List[str] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__snake_case ) ):
__A : Dict = 0
def _lowerCAmelCase ( __snake_case : list[str] , __snake_case : list[str] ) -> list[list[int]]:
__A : Union[str, Any] = [[0 for x in range(len(__snake_case ) )] for x in range(len(__snake_case ) )]
for i in range(len(__snake_case ) ):
__A : Optional[int] = prime_implicants[i].count('_' )
for j in range(len(__snake_case ) ):
if is_for_table(prime_implicants[i] , binary[j] , __snake_case ):
__A : int = 1
return chart
def _lowerCAmelCase ( ) -> None:
__A : Optional[Any] = int(input('Enter the no. of variables\n' ) )
__A : Optional[int] = [
float(__snake_case )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
__A : Optional[Any] = decimal_to_binary(__snake_case , __snake_case )
__A : Dict = check(__snake_case )
print('Prime Implicants are:' )
print(__snake_case )
__A : List[Any] = prime_implicant_chart(__snake_case , __snake_case )
__A : Tuple = selection(__snake_case , __snake_case )
print('Essential Prime Implicants are:' )
print(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 8 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = 42
class SCREAMING_SNAKE_CASE (a__ , a__ ):
@register_to_config
def __init__( self , _UpperCAmelCase = 6_5536 , _UpperCAmelCase = None , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = 0 , _UpperCAmelCase = "fourier" , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = 0.0 , _UpperCAmelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _UpperCAmelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _UpperCAmelCase = "UNetMidBlock1D" , _UpperCAmelCase = None , _UpperCAmelCase = (32, 32, 64) , _UpperCAmelCase = None , _UpperCAmelCase = 8 , _UpperCAmelCase = 1 , _UpperCAmelCase = False , ):
'''simple docstring'''
super().__init__()
__A : Dict = sample_size
# time
if time_embedding_type == "fourier":
__A : int = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_UpperCAmelCase , log=_UpperCAmelCase , flip_sin_to_cos=_UpperCAmelCase)
__A : Any = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
__A : List[str] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=_UpperCAmelCase , downscale_freq_shift=_UpperCAmelCase)
__A : List[str] = block_out_channels[0]
if use_timestep_embedding:
__A : Optional[Any] = block_out_channels[0] * 4
__A : Optional[int] = TimestepEmbedding(
in_channels=_UpperCAmelCase , time_embed_dim=_UpperCAmelCase , act_fn=_UpperCAmelCase , out_dim=block_out_channels[0] , )
__A : Dict = nn.ModuleList([])
__A : Dict = None
__A : Tuple = nn.ModuleList([])
__A : Tuple = None
# down
__A : Any = in_channels
for i, down_block_type in enumerate(_UpperCAmelCase):
__A : Tuple = output_channel
__A : Optional[Any] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
__A : List[str] = i == len(_UpperCAmelCase) - 1
__A : int = get_down_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_UpperCAmelCase)
# mid
__A : str = get_mid_block(
_UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_UpperCAmelCase , add_downsample=_UpperCAmelCase , )
# up
__A : Optional[int] = list(reversed(_UpperCAmelCase))
__A : Optional[int] = reversed_block_out_channels[0]
if out_block_type is None:
__A : str = out_channels
else:
__A : List[Any] = block_out_channels[0]
for i, up_block_type in enumerate(_UpperCAmelCase):
__A : Optional[Any] = output_channel
__A : Optional[Any] = (
reversed_block_out_channels[i + 1] if i < len(_UpperCAmelCase) - 1 else final_upsample_channels
)
__A : Dict = i == len(_UpperCAmelCase) - 1
__A : str = get_up_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_UpperCAmelCase)
__A : Optional[int] = output_channel
# out
__A : str = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32)
__A : Optional[Any] = get_out_block(
out_block_type=_UpperCAmelCase , num_groups_out=_UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=_UpperCAmelCase , act_fn=_UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , ):
'''simple docstring'''
__A : Any = timestep
if not torch.is_tensor(_UpperCAmelCase):
__A : Any = torch.tensor([timesteps] , dtype=torch.long , device=sample.device)
elif torch.is_tensor(_UpperCAmelCase) and len(timesteps.shape) == 0:
__A : Any = timesteps[None].to(sample.device)
__A : List[Any] = self.time_proj(_UpperCAmelCase)
if self.config.use_timestep_embedding:
__A : Dict = self.time_mlp(_UpperCAmelCase)
else:
__A : Dict = timestep_embed[..., None]
__A : Tuple = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype)
__A : List[Any] = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]))
# 2. down
__A : int = ()
for downsample_block in self.down_blocks:
__A ,__A : int = downsample_block(hidden_states=_UpperCAmelCase , temb=_UpperCAmelCase)
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
__A : Optional[int] = self.mid_block(_UpperCAmelCase , _UpperCAmelCase)
# 4. up
for i, upsample_block in enumerate(self.up_blocks):
__A : Any = down_block_res_samples[-1:]
__A : Optional[int] = down_block_res_samples[:-1]
__A : Any = upsample_block(_UpperCAmelCase , res_hidden_states_tuple=_UpperCAmelCase , temb=_UpperCAmelCase)
# 5. post-process
if self.out_block:
__A : Dict = self.out_block(_UpperCAmelCase , _UpperCAmelCase)
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_UpperCAmelCase) | 8 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : int = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : int = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[Any] = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[int] = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[int] = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowercase__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> int:
if len(__snake_case ) != len(__snake_case ):
raise ValueError('String lengths must match!' )
__A : Optional[Any] = 0
for chara, chara in zip(__snake_case , __snake_case ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : Optional[Any] = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowercase__ : int = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowercase__ : Tuple = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowercase__ : Any = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 5_12,
'''facebook/dpr-ctx_encoder-multiset-base''': 5_12,
}
lowercase__ : int = {
'''facebook/dpr-question_encoder-single-nq-base''': 5_12,
'''facebook/dpr-question_encoder-multiset-base''': 5_12,
}
lowercase__ : Tuple = {
'''facebook/dpr-reader-single-nq-base''': 5_12,
'''facebook/dpr-reader-multiset-base''': 5_12,
}
lowercase__ : Union[str, Any] = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowercase__ : Any = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowercase__ : Optional[Any] = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = DPRContextEncoderTokenizer
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = DPRQuestionEncoderTokenizer
lowercase__ : Union[str, Any] = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
lowercase__ : List[Any] = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
lowercase__ : Tuple = r'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(a__ )
class SCREAMING_SNAKE_CASE :
def __call__( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , )
elif titles is None or texts is None:
__A : List[str] = titles if texts is None else texts
return super().__call__(
_UpperCAmelCase , _UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , )
__A : Optional[int] = titles if not isinstance(_UpperCAmelCase , _UpperCAmelCase) else [titles]
__A : Optional[int] = texts if not isinstance(_UpperCAmelCase , _UpperCAmelCase) else [texts]
__A : Union[str, Any] = len(_UpperCAmelCase)
__A : Any = questions if not isinstance(_UpperCAmelCase , _UpperCAmelCase) else [questions] * n_passages
assert len(_UpperCAmelCase) == len(
_UpperCAmelCase), F'There should be as many titles than texts but got {len(_UpperCAmelCase)} titles and {len(_UpperCAmelCase)} texts.'
__A : Union[str, Any] = super().__call__(_UpperCAmelCase , _UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase)['input_ids']
__A : Optional[int] = super().__call__(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase)['input_ids']
__A : List[str] = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_UpperCAmelCase , _UpperCAmelCase)
]
}
if return_attention_mask is not False:
__A : Any = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
__A : List[Any] = attention_mask
return self.pad(_UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 16 , _UpperCAmelCase = 64 , _UpperCAmelCase = 4 , ):
'''simple docstring'''
__A : int = reader_input['input_ids']
__A ,__A ,__A : Optional[Any] = reader_output[:3]
__A : str = len(_UpperCAmelCase)
__A : Any = sorted(range(_UpperCAmelCase) , reverse=_UpperCAmelCase , key=relevance_logits.__getitem__)
__A : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
__A : Union[str, Any] = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
__A : Tuple = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__A : int = sequence_ids.index(self.pad_token_id)
else:
__A : Optional[int] = len(_UpperCAmelCase)
__A : Union[str, Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_UpperCAmelCase , top_spans=_UpperCAmelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_UpperCAmelCase , start_index=_UpperCAmelCase , end_index=_UpperCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(_UpperCAmelCase) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
'''simple docstring'''
__A : List[str] = []
for start_index, start_score in enumerate(_UpperCAmelCase):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
__A : Any = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: x[1] , reverse=_UpperCAmelCase)
__A : List[str] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'Wrong span indices: [{start_index}:{end_index}]'
__A : int = end_index - start_index + 1
assert length <= max_answer_length, F'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(_UpperCAmelCase) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a__ )
class SCREAMING_SNAKE_CASE (a__ , a__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
lowerCAmelCase = DPRReaderTokenizer | 8 |
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Tuple = logging.get_logger(__name__)
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> Union[str, Any]:
__A : int = RobertaPreLayerNormConfig.from_pretrained(
__snake_case , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
__A : Tuple = torch.load(hf_hub_download(repo_id=__snake_case , filename='pytorch_model.bin' ) )
__A : str = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
__A : Dict = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
__A : str = tensor_value
__A : Union[str, Any] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__snake_case , config=__snake_case , state_dict=__snake_case )
model.save_pretrained(__snake_case )
# convert tokenizer
__A : List[Any] = AutoTokenizer.from_pretrained(__snake_case )
tokenizer.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 8 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int = 10_00 ) -> int:
__A : Tuple = -1
__A : Tuple = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
__A : Optional[int] = (n * n - 2 * a * n) // (2 * n - 2 * a)
__A : Optional[int] = n - a - b
if c * c == (a * a + b * b):
__A : Optional[Any] = a * b * c
if candidate >= product:
__A : List[str] = candidate
return product
if __name__ == "__main__":
print(f"""{solution() = }""") | 8 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
lowercase__ : Dict = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = field(default=a__ , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = super().to_dict()
for k, v in d.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : List[Any] = v.to_dict()
return d | 8 | 1 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = set_counts
__A : Optional[int] = max(_UpperCAmelCase)
__A : int = len(_UpperCAmelCase)
__A : str = [1] * num_sets
__A : int = list(range(_UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = self.get_parent(_UpperCAmelCase)
__A : Union[str, Any] = self.get_parent(_UpperCAmelCase)
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__A : Dict = 0
__A : Optional[int] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__A : List[Any] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__A : Dict = 0
__A : Dict = src_parent
__A : List[Any] = self.set_counts[src_parent]
__A : Any = max(self.max_set , _UpperCAmelCase)
return True
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
__A : List[str] = self.get_parent(self.parents[disj_set])
return self.parents[disj_set] | 8 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''lxmert'''
lowerCAmelCase = {}
def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=9500 , _UpperCAmelCase=1600 , _UpperCAmelCase=400 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=9 , _UpperCAmelCase=5 , _UpperCAmelCase=5 , _UpperCAmelCase=2048 , _UpperCAmelCase=4 , _UpperCAmelCase=6.67 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Tuple = vocab_size
__A : int = hidden_size
__A : str = num_attention_heads
__A : Tuple = hidden_act
__A : int = intermediate_size
__A : str = hidden_dropout_prob
__A : Optional[int] = attention_probs_dropout_prob
__A : Optional[Any] = max_position_embeddings
__A : Tuple = type_vocab_size
__A : Optional[int] = initializer_range
__A : Any = layer_norm_eps
__A : Optional[Any] = num_qa_labels
__A : Optional[int] = num_object_labels
__A : Any = num_attr_labels
__A : Union[str, Any] = l_layers
__A : Optional[int] = x_layers
__A : List[Any] = r_layers
__A : Tuple = visual_feat_dim
__A : Tuple = visual_pos_dim
__A : Optional[int] = visual_loss_normalizer
__A : int = task_matched
__A : List[Any] = task_mask_lm
__A : Optional[Any] = task_obj_predict
__A : str = task_qa
__A : List[Any] = visual_obj_loss
__A : Optional[Any] = visual_attr_loss
__A : Union[str, Any] = visual_feat_loss
__A : Union[str, Any] = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**_UpperCAmelCase) | 8 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : Union[str, Any] = {
'''microsoft/table-transformer-detection''': (
'''https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''table-transformer'''
lowerCAmelCase = ['''past_key_values''']
lowerCAmelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=3 , _UpperCAmelCase=100 , _UpperCAmelCase=6 , _UpperCAmelCase=2048 , _UpperCAmelCase=8 , _UpperCAmelCase=6 , _UpperCAmelCase=2048 , _UpperCAmelCase=8 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase="relu" , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase="sine" , _UpperCAmelCase="resnet50" , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=1 , _UpperCAmelCase=5 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=1 , _UpperCAmelCase=5 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , **_UpperCAmelCase , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
__A : int = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : Any = backbone_config.get('model_type')
__A : int = CONFIG_MAPPING[backbone_model_type]
__A : str = config_class.from_dict(_UpperCAmelCase)
# set timm attributes to None
__A ,__A ,__A : Optional[Any] = None, None, None
__A : str = use_timm_backbone
__A : Optional[Any] = backbone_config
__A : Dict = num_channels
__A : Optional[int] = num_queries
__A : Optional[int] = d_model
__A : Tuple = encoder_ffn_dim
__A : str = encoder_layers
__A : Optional[int] = encoder_attention_heads
__A : Dict = decoder_ffn_dim
__A : Tuple = decoder_layers
__A : Union[str, Any] = decoder_attention_heads
__A : Tuple = dropout
__A : Optional[int] = attention_dropout
__A : Union[str, Any] = activation_dropout
__A : Optional[int] = activation_function
__A : Optional[Any] = init_std
__A : Tuple = init_xavier_std
__A : Tuple = encoder_layerdrop
__A : List[str] = decoder_layerdrop
__A : Optional[Any] = encoder_layers
__A : Optional[int] = auxiliary_loss
__A : Any = position_embedding_type
__A : Optional[int] = backbone
__A : Any = use_pretrained_backbone
__A : Tuple = dilation
# Hungarian matcher
__A : int = class_cost
__A : List[Any] = bbox_cost
__A : Any = giou_cost
# Loss coefficients
__A : Optional[int] = mask_loss_coefficient
__A : Dict = dice_loss_coefficient
__A : Dict = bbox_loss_coefficient
__A : int = giou_loss_coefficient
__A : int = eos_coefficient
super().__init__(is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return self.d_model
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
])
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return 1e-5
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return 12 | 8 |
'''simple docstring'''
import math
import sys
def _lowerCAmelCase ( __snake_case : int ) -> int:
if number != int(__snake_case ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
__A : str = [-1] * (number + 1)
__A : Dict = 0
for i in range(1 , number + 1 ):
__A : int = sys.maxsize
__A : int = int(math.sqrt(__snake_case ) )
for j in range(1 , root + 1 ):
__A : str = 1 + answers[i - (j**2)]
__A : Dict = min(__snake_case , __snake_case )
__A : Union[str, Any] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class SCREAMING_SNAKE_CASE (a__ ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = SMALL_MODEL_IDENTIFIER
__A : Optional[Any] = 'pt'
__A : Optional[int] = 'tf'
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = AutoModel.from_pretrained(self.test_model)
model_pt.save_pretrained(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Any = TFAutoModel.from_pretrained(self.test_model , from_pt=_UpperCAmelCase)
model_tf.save_pretrained(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = 'mock_framework'
# Framework provided - return whatever the user provides
__A : int = FeaturesManager.determine_framework(self.test_model , _UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_UpperCAmelCase)
__A : List[str] = FeaturesManager.determine_framework(_UpperCAmelCase , _UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_UpperCAmelCase)
__A : str = FeaturesManager.determine_framework(_UpperCAmelCase , _UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_UpperCAmelCase)
__A : str = FeaturesManager.determine_framework(_UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , self.framework_pt)
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_UpperCAmelCase)
__A : Union[str, Any] = FeaturesManager.determine_framework(_UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , self.framework_tf)
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_UpperCAmelCase):
__A : Tuple = FeaturesManager.determine_framework(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = MagicMock(return_value=_UpperCAmelCase)
with patch('transformers.onnx.features.is_tf_available' , _UpperCAmelCase):
__A : Union[str, Any] = FeaturesManager.determine_framework(self.test_model)
self.assertEqual(_UpperCAmelCase , self.framework_pt)
# PyTorch not in environment -> use TensorFlow
__A : int = MagicMock(return_value=_UpperCAmelCase)
with patch('transformers.onnx.features.is_torch_available' , _UpperCAmelCase):
__A : Any = FeaturesManager.determine_framework(self.test_model)
self.assertEqual(_UpperCAmelCase , self.framework_tf)
# Both in environment -> use PyTorch
__A : List[Any] = MagicMock(return_value=_UpperCAmelCase)
__A : Tuple = MagicMock(return_value=_UpperCAmelCase)
with patch('transformers.onnx.features.is_tf_available' , _UpperCAmelCase), patch(
'transformers.onnx.features.is_torch_available' , _UpperCAmelCase):
__A : Union[str, Any] = FeaturesManager.determine_framework(self.test_model)
self.assertEqual(_UpperCAmelCase , self.framework_pt)
# Both not in environment -> raise error
__A : Tuple = MagicMock(return_value=_UpperCAmelCase)
__A : Tuple = MagicMock(return_value=_UpperCAmelCase)
with patch('transformers.onnx.features.is_tf_available' , _UpperCAmelCase), patch(
'transformers.onnx.features.is_torch_available' , _UpperCAmelCase):
with self.assertRaises(_UpperCAmelCase):
__A : Optional[int] = FeaturesManager.determine_framework(self.test_model) | 8 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __snake_case : list[int] , __snake_case : list[int] , __snake_case : int ) -> tuple[float, list[float]]:
__A : int = list(range(len(__snake_case ) ) )
__A : Optional[Any] = [v / w for v, w in zip(__snake_case , __snake_case )]
index.sort(key=lambda __snake_case : ratio[i] , reverse=__snake_case )
__A : float = 0
__A : list[float] = [0] * len(__snake_case )
for i in index:
if weight[i] <= capacity:
__A : Optional[int] = 1
max_value += value[i]
capacity -= weight[i]
else:
__A : List[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=a__ )
class SCREAMING_SNAKE_CASE (a__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowerCAmelCase = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
lowerCAmelCase = Features({'''text''': Value('''string''' )} )
lowerCAmelCase = Features({'''summary''': Value('''string''' )} )
lowerCAmelCase = "text"
lowerCAmelCase = "summary"
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return {self.text_column: "text", self.summary_column: "summary"} | 8 |
'''simple docstring'''
from __future__ import annotations
import math
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : int = size
# approximate the overall size of segment tree with given value
__A : Optional[Any] = [0 for i in range(0 , 4 * size)]
# create array to store lazy update
__A : Optional[Any] = [0 for i in range(0 , 4 * size)]
__A : str = [0 for i in range(0 , 4 * size)] # flag for lazy update
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return idx * 2
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return idx * 2 + 1
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if left_element == right_element:
__A : List[Any] = a[left_element - 1]
else:
__A : List[str] = (left_element + right_element) // 2
self.build(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
self.build(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase)
__A : Any = max(
self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)])
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if self.flag[idx] is True:
__A : Optional[Any] = self.lazy[idx]
__A : Optional[Any] = False
if left_element != right_element:
__A : List[Any] = self.lazy[idx]
__A : Dict = self.lazy[idx]
__A : Tuple = True
__A : Union[str, Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
__A : Optional[int] = val
if left_element != right_element:
__A : Tuple = val
__A : Any = val
__A : Tuple = True
__A : Union[str, Any] = True
return True
__A : str = (left_element + right_element) // 2
self.update(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
self.update(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : int = max(
self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)])
return True
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if self.flag[idx] is True:
__A : Union[str, Any] = self.lazy[idx]
__A : List[str] = False
if left_element != right_element:
__A : Union[str, Any] = self.lazy[idx]
__A : Optional[int] = self.lazy[idx]
__A : str = True
__A : Union[str, Any] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
__A : Any = (left_element + right_element) // 2
__A : int = self.query(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : Union[str, Any] = self.query(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
return max(_UpperCAmelCase , _UpperCAmelCase)
def __str__( self):
'''simple docstring'''
return str([self.query(1 , 1 , self.size , _UpperCAmelCase , _UpperCAmelCase) for i in range(1 , self.size + 1)])
if __name__ == "__main__":
lowercase__ : Union[str, Any] = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
lowercase__ : str = 15
lowercase__ : List[Any] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 1_11)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 2_35)
print(segt) | 8 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase__ : str = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = ['''input_features''', '''attention_mask''']
def __init__( self , _UpperCAmelCase=80 , _UpperCAmelCase=1_6000 , _UpperCAmelCase=80 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(feature_size=_UpperCAmelCase , sampling_rate=_UpperCAmelCase , padding_value=_UpperCAmelCase , **_UpperCAmelCase)
__A : str = num_mel_bins
__A : Optional[Any] = do_ceptral_normalize
__A : Optional[Any] = normalize_means
__A : int = normalize_vars
__A : int = True
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , ):
'''simple docstring'''
__A : Any = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
__A : Union[str, Any] = torch.from_numpy(_UpperCAmelCase).unsqueeze(0)
__A : List[str] = ta_kaldi.fbank(_UpperCAmelCase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate)
return features.numpy()
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = 0.0 , ):
'''simple docstring'''
if normalize_means:
__A : Any = x[:input_length].mean(axis=0)
__A : Dict = np.subtract(_UpperCAmelCase , _UpperCAmelCase)
if normalize_vars:
__A : Optional[Any] = x[:input_length].std(axis=0)
__A : List[Any] = np.divide(_UpperCAmelCase , _UpperCAmelCase)
if input_length < x.shape[0]:
__A : Dict = padding_value
# make sure array is in float32
__A : List[str] = x.astype(np.floataa)
return x
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
__A : Union[str, Any] = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(_UpperCAmelCase , _UpperCAmelCase , self.normalize_means , self.normalize_vars , self.padding_value)
for x, n in zip(_UpperCAmelCase , _UpperCAmelCase)
]
def __call__( self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.')
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.')
__A : Tuple = isinstance(_UpperCAmelCase , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}')
__A : Any = is_batched_numpy or (
isinstance(_UpperCAmelCase , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
__A : Union[str, Any] = [np.asarray(_UpperCAmelCase , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCAmelCase , np.ndarray):
__A : Any = np.asarray(_UpperCAmelCase , dtype=np.floataa)
elif isinstance(_UpperCAmelCase , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
__A : Optional[Any] = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
__A : Tuple = [raw_speech]
# extract fbank features
__A : Union[str, Any] = [self._extract_fbank_features(_UpperCAmelCase) for waveform in raw_speech]
# convert into correct format for padding
__A : int = BatchFeature({'input_features': features})
__A : List[Any] = self.pad(
_UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , )
# make sure list is in array format
__A : Optional[int] = padded_inputs.get('input_features')
if isinstance(input_features[0] , _UpperCAmelCase):
__A : Tuple = [np.asarray(_UpperCAmelCase , dtype=np.floataa) for feature in input_features]
__A : Any = padded_inputs.get('attention_mask')
if attention_mask is not None:
__A : Union[str, Any] = [np.asarray(_UpperCAmelCase , dtype=np.intaa) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
__A : int = (
np.array(_UpperCAmelCase , dtype=np.intaa)
if self._get_padding_strategies(_UpperCAmelCase , max_length=_UpperCAmelCase) is not PaddingStrategy.DO_NOT_PAD
else None
)
__A : Optional[int] = self.normalize(
padded_inputs['input_features'] , attention_mask=_UpperCAmelCase)
if return_tensors is not None:
__A : Optional[Any] = padded_inputs.convert_to_tensors(_UpperCAmelCase)
return padded_inputs | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int , __snake_case : int , __snake_case : int ) -> float:
__A : Dict = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _lowerCAmelCase ( ) -> Union[str, Any]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , ):
'''simple docstring'''
__A : List[str] = parent
__A : Optional[Any] = 13
__A : Tuple = 7
__A : List[Any] = True
__A : int = True
__A : int = False
__A : Dict = True
__A : List[str] = 99
__A : Optional[Any] = 32
__A : Dict = 2
__A : Optional[Any] = 4
__A : Tuple = 37
__A : List[str] = 'gelu'
__A : Optional[int] = 0.1
__A : Dict = 0.1
__A : List[str] = 512
__A : Optional[Any] = 16
__A : Optional[int] = 2
__A : Optional[int] = 0.02
__A : Dict = 3
__A : Dict = 4
__A : List[Any] = None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__A : int = None
if self.use_input_mask:
__A : Tuple = random_attention_mask([self.batch_size, self.seq_length])
__A : Any = None
__A : List[str] = None
__A : Any = None
if self.use_labels:
__A : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__A : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__A : int = ids_tensor([self.batch_size] , self.num_choices)
__A : Optional[Any] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = TFDistilBertModel(config=_UpperCAmelCase)
__A : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
__A : str = model(_UpperCAmelCase)
__A : Any = [input_ids, input_mask]
__A : Tuple = model(_UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[str] = TFDistilBertForMaskedLM(config=_UpperCAmelCase)
__A : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
__A : Union[str, Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = TFDistilBertForQuestionAnswering(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
}
__A : str = model(_UpperCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = self.num_labels
__A : Dict = TFDistilBertForSequenceClassification(_UpperCAmelCase)
__A : Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
__A : Any = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = self.num_choices
__A : List[str] = TFDistilBertForMultipleChoice(_UpperCAmelCase)
__A : Optional[int] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : List[str] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : Any = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
}
__A : Tuple = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = self.num_labels
__A : List[str] = TFDistilBertForTokenClassification(_UpperCAmelCase)
__A : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
__A : str = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.prepare_config_and_inputs()
((__A) ,(__A) ,(__A) ,(__A) ,(__A) ,(__A)) : Optional[Any] = config_and_inputs
__A : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
lowerCAmelCase = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = TFDistilBertModelTester(self)
__A : Union[str, Any] = ConfigTester(self , config_class=_UpperCAmelCase , dim=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]):
__A : Optional[int] = TFDistilBertModel.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
@require_tf
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = TFDistilBertModel.from_pretrained('distilbert-base-uncased')
__A : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]])
__A : Dict = model(_UpperCAmelCase)[0]
__A : Optional[int] = [1, 6, 768]
self.assertEqual(output.shape , _UpperCAmelCase)
__A : Optional[int] = tf.constant(
[
[
[0.19261885, -0.13732955, 0.4119799],
[0.22150156, -0.07422661, 0.39037204],
[0.22756018, -0.0896414, 0.3701467],
]
])
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4) | 8 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
'''simple docstring'''
__A : Optional[int] = parent
__A : str = 13
__A : List[Any] = 7
__A : List[str] = True
__A : str = True
__A : Optional[Any] = True
__A : int = True
__A : Dict = 99
__A : Dict = 384
__A : Any = 2
__A : int = 4
__A : Optional[Any] = 37
__A : Optional[int] = 'gelu'
__A : Dict = 0.1
__A : Optional[int] = 0.1
__A : Any = 512
__A : int = 16
__A : List[str] = 2
__A : str = 0.02
__A : Any = 3
__A : str = 4
__A : Union[str, Any] = 128
__A : int = 2
__A : List[Any] = 9
__A : List[Any] = 1
__A : List[Any] = None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__A : str = None
if self.use_input_mask:
__A : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__A : Optional[Any] = None
if self.use_token_type_ids:
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__A : Optional[int] = None
__A : List[str] = None
__A : Dict = None
if self.use_labels:
__A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__A : str = ids_tensor([self.batch_size] , self.num_choices)
__A : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = TFConvBertModel(config=_UpperCAmelCase)
__A : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__A : Tuple = [input_ids, input_mask]
__A : Any = model(_UpperCAmelCase)
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : str = TFConvBertForMaskedLM(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : str = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = self.num_labels
__A : Any = TFConvBertForSequenceClassification(config=_UpperCAmelCase)
__A : Optional[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = self.num_choices
__A : List[str] = TFConvBertForMultipleChoice(config=_UpperCAmelCase)
__A : int = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : List[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : int = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__A : Optional[Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = self.num_labels
__A : List[Any] = TFConvBertForTokenClassification(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : int = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = TFConvBertForQuestionAnswering(config=_UpperCAmelCase)
__A : Any = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Union[str, Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.prepare_config_and_inputs()
(
(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,
) : Union[str, Any] = config_and_inputs
__A : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = TFConvBertModelTester(self)
__A : str = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : List[str] = True
__A : List[str] = True
if hasattr(_UpperCAmelCase , 'use_cache'):
__A : List[Any] = True
__A : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : Union[str, Any] = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
for model_class in self.all_model_classes:
__A : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)
__A : Optional[int] = model_class(_UpperCAmelCase)
__A : Optional[Any] = len(model(_UpperCAmelCase))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase)
__A : Union[str, Any] = os.path.join(_UpperCAmelCase , 'saved_model' , '1')
__A : Tuple = tf.keras.models.load_model(_UpperCAmelCase)
__A : str = model(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Optional[int] = outputs['encoder_hidden_states']
__A : str = outputs['encoder_attentions']
else:
__A : List[Any] = outputs['hidden_states']
__A : Optional[Any] = outputs['attentions']
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
__A : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
self.assertListEqual(
list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
self.assertIsNotNone(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = True
__A : str = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length)
__A : Any = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : int = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
__A : Tuple = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
def check_decoder_attentions_output(_UpperCAmelCase):
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(out_len % 2 , 0)
__A : Any = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase):
__A : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__A : Dict = True
__A : Any = False
__A : str = model_class(_UpperCAmelCase)
__A : List[str] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_decoder_attentions_output(_UpperCAmelCase)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__A : int = True
__A : Tuple = model_class(_UpperCAmelCase)
__A : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
# Check attention is always last and order is fine
__A : Any = True
__A : str = True
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : Union[str, Any] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase))
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
@require_tf
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
__A : str = tf.constant([[0, 1, 2, 3, 4, 5]])
__A : Optional[int] = model(_UpperCAmelCase)[0]
__A : List[Any] = [1, 6, 768]
self.assertEqual(output.shape , _UpperCAmelCase)
__A : Tuple = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
])
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4) | 8 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class SCREAMING_SNAKE_CASE (a__ , a__ , a__ , unittest.TestCase ):
lowerCAmelCase = StableUnCLIPImgaImgPipeline
lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase = frozenset([] )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = 32
__A : List[str] = embedder_hidden_size
# image encoding components
__A : List[Any] = CLIPImageProcessor(crop_size=32 , size=32)
torch.manual_seed(0)
__A : Optional[int] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=_UpperCAmelCase , projection_dim=_UpperCAmelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ))
# regular denoising components
torch.manual_seed(0)
__A : Union[str, Any] = StableUnCLIPImageNormalizer(embedding_dim=_UpperCAmelCase)
__A : Dict = DDPMScheduler(beta_schedule='squaredcos_cap_v2')
torch.manual_seed(0)
__A : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
torch.manual_seed(0)
__A : Union[str, Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_UpperCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ))
torch.manual_seed(0)
__A : Dict = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_UpperCAmelCase , layers_per_block=1 , upcast_attention=_UpperCAmelCase , use_linear_projection=_UpperCAmelCase , )
torch.manual_seed(0)
__A : Union[str, Any] = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.00085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=_UpperCAmelCase , steps_offset=1 , )
torch.manual_seed(0)
__A : Any = AutoencoderKL()
__A : List[str] = {
# image encoding components
'feature_extractor': feature_extractor,
'image_encoder': image_encoder.eval(),
# image noising components
'image_normalizer': image_normalizer.eval(),
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder.eval(),
'unet': unet.eval(),
'scheduler': scheduler,
'vae': vae.eval(),
}
return components
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=0 , _UpperCAmelCase=True):
'''simple docstring'''
if str(_UpperCAmelCase).startswith('mps'):
__A : Tuple = torch.manual_seed(_UpperCAmelCase)
else:
__A : Any = torch.Generator(device=_UpperCAmelCase).manual_seed(_UpperCAmelCase)
__A : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase)).to(_UpperCAmelCase)
if pil_image:
__A : List[Any] = input_image * 0.5 + 0.5
__A : Dict = input_image.clamp(0 , 1)
__A : Optional[int] = input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
__A : Tuple = DiffusionPipeline.numpy_to_pil(_UpperCAmelCase)[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__A : Union[str, Any] = self.get_dummy_components()
__A : Dict = StableUnCLIPImgaImgPipeline(**_UpperCAmelCase)
__A : Dict = sd_pipe.to(_UpperCAmelCase)
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase)
__A : Dict = self.get_dummy_inputs(_UpperCAmelCase)
inputs.update({'image_embeds': None})
__A : Any = sd_pipe(**_UpperCAmelCase).images
__A : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__A : List[Any] = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = torch_device in ['cpu', 'mps']
self._test_attention_slicing_forward_pass(test_max_difference=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=_UpperCAmelCase)
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=_UpperCAmelCase)
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png')
__A : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy')
__A : Any = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-l-img2img' , torch_dtype=torch.floataa)
pipe.to(_UpperCAmelCase)
pipe.set_progress_bar_config(disable=_UpperCAmelCase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__A : Optional[Any] = torch.Generator(device='cpu').manual_seed(0)
__A : Dict = pipe(_UpperCAmelCase , 'anime turle' , generator=_UpperCAmelCase , output_type='np')
__A : Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png')
__A : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy')
__A : Optional[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa)
pipe.to(_UpperCAmelCase)
pipe.set_progress_bar_config(disable=_UpperCAmelCase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__A : str = torch.Generator(device='cpu').manual_seed(0)
__A : Any = pipe(_UpperCAmelCase , 'anime turle' , generator=_UpperCAmelCase , output_type='np')
__A : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png')
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__A : Optional[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa)
__A : int = pipe.to(_UpperCAmelCase)
pipe.set_progress_bar_config(disable=_UpperCAmelCase)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__A : Optional[int] = pipe(
_UpperCAmelCase , 'anime turtle' , num_inference_steps=2 , output_type='np' , )
__A : Optional[int] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9 | 8 |
'''simple docstring'''
import argparse
import os
import re
lowercase__ : Optional[int] = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
lowercase__ : Dict = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase__ : List[str] = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase__ : Tuple = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase__ : str = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase__ : str = re.compile(r'''\[([^\]]+)\]''')
def _lowerCAmelCase ( __snake_case : str ) -> Tuple:
__A : List[Any] = _re_indent.search(__snake_case )
return "" if search is None else search.groups()[0]
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : str="" , __snake_case : Any=None , __snake_case : List[Any]=None ) -> Optional[int]:
__A : Tuple = 0
__A : Optional[int] = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(__snake_case ):
index += 1
__A : Optional[int] = ['\n'.join(lines[:index] )]
else:
__A : Any = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__A : Tuple = [lines[index]]
index += 1
while index < len(__snake_case ) and (end_prompt is None or not lines[index].startswith(__snake_case )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__snake_case ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(__snake_case ) )
if index < len(__snake_case ) - 1:
__A : Union[str, Any] = [lines[index + 1]]
index += 1
else:
__A : Union[str, Any] = []
else:
blocks.append('\n'.join(__snake_case ) )
__A : Optional[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__snake_case ) > 0:
blocks.append('\n'.join(__snake_case ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__snake_case ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def _lowerCAmelCase ( __snake_case : List[Any] ) -> int:
def _inner(__snake_case : List[Any] ):
return key(__snake_case ).lower().replace('_' , '' )
return _inner
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any=None ) -> List[Any]:
# If no key is provided, we use a noop.
def noop(__snake_case : List[Any] ):
return x
if key is None:
__A : Optional[Any] = noop
# Constants are all uppercase, they go first.
__A : str = [obj for obj in objects if key(__snake_case ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__A : List[str] = [obj for obj in objects if key(__snake_case )[0].isupper() and not key(__snake_case ).isupper()]
# Functions begin with a lowercase, they go last.
__A : str = [obj for obj in objects if not key(__snake_case )[0].isupper()]
__A : Tuple = ignore_underscore(__snake_case )
return sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case )
def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Tuple:
# This inner function sort imports between [ ].
def _replace(__snake_case : Tuple ):
__A : List[str] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
__A : int = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__A : Dict = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(__snake_case )] ) + "]"
__A : List[Any] = import_statement.split('\n' )
if len(__snake_case ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__A : Optional[int] = 2 if lines[1].strip() == '[' else 1
__A : Any = [(i, _re_strip_line.search(__snake_case ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__A : Optional[int] = sort_objects(__snake_case , key=lambda __snake_case : x[1] )
__A : Any = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__snake_case ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__A : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
__A : Dict = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__A : Tuple = keys[:-1]
__A : List[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(__snake_case )] )
return "\n".join(__snake_case )
else:
# Finally we have to deal with imports fitting on one line
__A : Optional[Any] = _re_bracket_content.sub(_replace , __snake_case )
return import_statement
def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : List[Any]=True ) -> Optional[Any]:
with open(__snake_case , 'r' ) as f:
__A : Dict = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__A : str = split_code_in_indented_blocks(
__snake_case , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__snake_case ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__A : Tuple = main_blocks[block_idx]
__A : int = block.split('\n' )
# Get to the start of the imports.
__A : Tuple = 0
while line_idx < len(__snake_case ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__A : Optional[int] = len(__snake_case )
else:
line_idx += 1
if line_idx >= len(__snake_case ):
continue
# Ignore beginning and last line: they don't contain anything.
__A : Dict = '\n'.join(block_lines[line_idx:-1] )
__A : int = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__A : Optional[int] = split_code_in_indented_blocks(__snake_case , indent_level=__snake_case )
# We have two categories of import key: list or _import_structure[key].append/extend
__A : Any = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__A : Dict = [(pattern.search(__snake_case ).groups()[0] if pattern.search(__snake_case ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__A : Optional[Any] = [(i, key) for i, key in enumerate(__snake_case ) if key is not None]
__A : Tuple = [x[0] for x in sorted(__snake_case , key=lambda __snake_case : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__A : str = 0
__A : Any = []
for i in range(len(__snake_case ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__A : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__snake_case )
count += 1
# And we put our main block back together with its first and last line.
__A : int = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__snake_case ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(__snake_case , 'w' ) as f:
f.write('\n'.join(__snake_case ) )
def _lowerCAmelCase ( __snake_case : int=True ) -> Optional[Any]:
__A : Tuple = []
for root, _, files in os.walk(__snake_case ):
if "__init__.py" in files:
__A : List[Any] = sort_imports(os.path.join(__snake_case , '__init__.py' ) , check_only=__snake_case )
if result:
__A : Dict = [os.path.join(__snake_case , '__init__.py' )]
if len(__snake_case ) > 0:
raise ValueError(f'Would overwrite {len(__snake_case )} files, run `make style`.' )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowercase__ : Union[str, Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only) | 8 | 1 |
'''simple docstring'''
import sys
lowercase__ : int = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def _lowerCAmelCase ( __snake_case : str ) -> int:
__A : Dict = 1
for digit in s:
product *= int(__snake_case )
return product
def _lowerCAmelCase ( __snake_case : str = N ) -> int:
__A : List[Any] = -sys.maxsize - 1
__A : str = n[:13]
__A : Any = 13
while cur_index < len(__snake_case ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
__A : Dict = substr[1:] + n[cur_index]
cur_index += 1
else:
__A : Tuple = max(__snake_case , str_eval(__snake_case ) )
__A : int = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""") | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int ) -> bool:
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
lowercase__ : int = int(input('''Enter number: ''').strip())
print(f"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""") | 8 | 1 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
lowercase__ : int = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class SCREAMING_SNAKE_CASE (a__ ):
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase)
requires_backends(self , 'vision')
self.check_model_type(_UpperCAmelCase)
def __call__( self , _UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
return super().__call__(_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
return {}, {}, {}
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : str = load_image(_UpperCAmelCase)
__A : Optional[int] = image.size
__A : Union[str, Any] = self.image_processor(images=_UpperCAmelCase , return_tensors=self.framework)
return model_inputs
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Any = self.model(**_UpperCAmelCase)
return model_outputs
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[str] = model_outputs.predicted_depth
__A : Optional[int] = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1) , size=self.image_size[::-1] , mode='bicubic' , align_corners=_UpperCAmelCase)
__A : List[str] = prediction.squeeze().cpu().numpy()
__A : Tuple = (output * 255 / np.max(_UpperCAmelCase)).astype('uint8')
__A : int = Image.fromarray(_UpperCAmelCase)
__A : Union[str, Any] = {}
__A : Tuple = predicted_depth
__A : List[str] = depth
return output_dict | 8 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : str = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Tuple:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__A : Optional[Any] = k.replace(__snake_case , __snake_case )
if k.startswith('encoder' ):
__A : Any = k.replace('.attn' , '.self_attn' )
__A : Any = k.replace('norm1' , 'self_attn_layer_norm' )
__A : str = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
__A : Tuple = k.replace('norm1' , 'self_attn_layer_norm' )
__A : str = k.replace('norm2' , 'encoder_attn_layer_norm' )
__A : int = k.replace('norm3' , 'final_layer_norm' )
return k
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Dict:
__A : Optional[int] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
__A : Tuple = sd.pop(__snake_case )
__A : Union[str, Any] = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
__A : str = v
lowercase__ : Tuple = ['''START''']
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any , __snake_case : List[Any] ) -> int:
__A : List[str] = torch.load(__snake_case , map_location='cpu' )
__A : Tuple = model['model']
__A : str = BlenderbotConfig.from_json_file(__snake_case )
__A : int = BlenderbotForConditionalGeneration(__snake_case )
__A : List[Any] = m.model.state_dict().keys()
__A : Optional[int] = []
__A : Optional[int] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__A : Union[str, Any] = rename_state_dict_key(__snake_case )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__A : Optional[Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__snake_case )
m.model.load_state_dict(__snake_case , strict=__snake_case )
m.half()
m.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json) | 8 | 1 |
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'kwargs, expected' , [
({'num_shards': 0, 'max_num_jobs': 1}, []),
({'num_shards': 10, 'max_num_jobs': 1}, [range(10 )]),
({'num_shards': 10, 'max_num_jobs': 10}, [range(__snake_case , i + 1 ) for i in range(10 )]),
({'num_shards': 1, 'max_num_jobs': 10}, [range(1 )]),
({'num_shards': 10, 'max_num_jobs': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'num_shards': 3, 'max_num_jobs': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : Any ) -> Any:
__A : Optional[int] = _distribute_shards(**__snake_case )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, max_num_jobs, expected' , [
({'foo': 0}, 10, [{'foo': 0}]),
({'shards': [0, 1, 2, 3]}, 1, [{'shards': [0, 1, 2, 3]}]),
({'shards': [0, 1, 2, 3]}, 4, [{'shards': [0]}, {'shards': [1]}, {'shards': [2]}, {'shards': [3]}]),
({'shards': [0, 1]}, 4, [{'shards': [0]}, {'shards': [1]}]),
({'shards': [0, 1, 2, 3]}, 2, [{'shards': [0, 1]}, {'shards': [2, 3]}]),
] , )
def _lowerCAmelCase ( __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : Optional[int] ) -> List[str]:
__A : List[str] = _split_gen_kwargs(__snake_case , __snake_case )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, expected' , [
({'foo': 0}, 1),
({'shards': [0]}, 1),
({'shards': [0, 1, 2, 3]}, 4),
({'shards': [0, 1, 2, 3], 'foo': 0}, 4),
({'shards': [0, 1, 2, 3], 'other': (0, 1)}, 4),
({'shards': [0, 1, 2, 3], 'shards2': [0, 1]}, RuntimeError),
] , )
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : List[Any] ) -> Optional[Any]:
if expected is RuntimeError:
with pytest.raises(__snake_case ):
_number_of_shards_in_gen_kwargs(__snake_case )
else:
__A : Optional[Any] = _number_of_shards_in_gen_kwargs(__snake_case )
assert out == expected | 8 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None):
'''simple docstring'''
__A : List[Any] = list(poly_a or [0])[:]
__A : Optional[int] = list(poly_b or [0])[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
__A : Union[str, Any] = len(self.polyA)
while self.polyB[-1] == 0:
self.polyB.pop()
__A : Optional[int] = len(self.polyB)
# Add 0 to make lengths equal a power of 2
__A : Optional[Any] = int(
2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1)))
while len(self.polyA) < self.c_max_length:
self.polyA.append(0)
while len(self.polyB) < self.c_max_length:
self.polyB.append(0)
# A complex root used for the fourier transform
__A : str = complex(mpmath.root(x=1 , n=self.c_max_length , k=1))
# The product
__A : Tuple = self.__multiply()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(_UpperCAmelCase) <= 1:
return dft[0]
#
__A : Dict = self.c_max_length // 2
while next_ncol > 0:
__A : Optional[Any] = [[] for i in range(_UpperCAmelCase)]
__A : Tuple = self.root**next_ncol
# First half of next step
__A : Optional[Any] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(_UpperCAmelCase):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j])
current_root *= root
# Second half of next step
__A : List[str] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(_UpperCAmelCase):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j])
current_root *= root
# Update
__A : Optional[int] = new_dft
__A : Tuple = next_ncol // 2
return dft[0]
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.__dft('A')
__A : Optional[Any] = self.__dft('B')
__A : str = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0]) <= 1:
return inverce_c[0]
# Inverse DFT
__A : Dict = 2
while next_ncol <= self.c_max_length:
__A : Optional[int] = [[] for i in range(_UpperCAmelCase)]
__A : Any = self.root ** (next_ncol // 2)
__A : Tuple = 1
# First half of next step
for j in range(self.c_max_length // next_ncol):
for i in range(next_ncol // 2):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2)
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root))
current_root *= root
# Update
__A : int = new_inverse_c
next_ncol *= 2
# Unpack
__A : Optional[int] = [round(x[0].real , 8) + round(x[0].imag , 8) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self):
'''simple docstring'''
__A : int = 'A = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A]))
__A : Optional[Any] = 'B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B]))
__A : str = 'A*B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.product))
return F'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase__ : int = {
'''configuration_m2m_100''': ['''M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''M2M100Config''', '''M2M100OnnxConfig'''],
'''tokenization_m2m_100''': ['''M2M100Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[Any] = [
'''M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''M2M100ForConditionalGeneration''',
'''M2M100Model''',
'''M2M100PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
lowercase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 |
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=[30, 30] , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=None , _UpperCAmelCase=8 , _UpperCAmelCase=10 , ):
'''simple docstring'''
__A : Union[str, Any] = parent
__A : Tuple = batch_size
__A : List[str] = image_size
__A : Dict = patch_size
__A : Optional[Any] = num_channels
__A : Tuple = is_training
__A : Dict = use_labels
__A : List[Any] = hidden_size
__A : Tuple = num_hidden_layers
__A : int = num_attention_heads
__A : Optional[int] = intermediate_size
__A : Tuple = hidden_act
__A : Any = hidden_dropout_prob
__A : Optional[Any] = attention_probs_dropout_prob
__A : List[Any] = type_sequence_label_size
__A : List[Any] = initializer_range
__A : Optional[int] = num_labels
__A : List[Any] = scope
__A : Any = n_targets
__A : Union[str, Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
__A : List[str] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
__A : int = num_patches + 1 + self.num_detection_tokens
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]])
__A : Tuple = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
__A : List[Any] = []
for i in range(self.batch_size):
__A : Optional[int] = {}
__A : Union[str, Any] = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=_UpperCAmelCase)
__A : str = torch.rand(self.n_targets , 4 , device=_UpperCAmelCase)
labels.append(_UpperCAmelCase)
__A : Any = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = YolosModel(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = YolosForObjectDetection(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : str = model(pixel_values=_UpperCAmelCase)
__A : List[str] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
__A : Union[str, Any] = model(pixel_values=_UpperCAmelCase , labels=_UpperCAmelCase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.prepare_config_and_inputs()
__A ,__A ,__A : Tuple = config_and_inputs
__A : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCAmelCase = (
{'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
__A : Optional[Any] = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase)
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
__A : Any = []
for i in range(self.model_tester.batch_size):
__A : Tuple = {}
__A : Tuple = torch.ones(
size=(self.model_tester.n_targets,) , device=_UpperCAmelCase , dtype=torch.long)
__A : Optional[Any] = torch.ones(
self.model_tester.n_targets , 4 , device=_UpperCAmelCase , dtype=torch.float)
labels.append(_UpperCAmelCase)
__A : str = labels
return inputs_dict
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = YolosModelTester(self)
__A : Dict = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Tuple = model_class(_UpperCAmelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__A : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[Any] = model_class(_UpperCAmelCase)
__A : str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : int = [*signature.parameters.keys()]
__A : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__A : Optional[int] = True
# in YOLOS, the seq_len is different
__A : Dict = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
__A : Dict = True
__A : Dict = False
__A : Union[str, Any] = True
__A : Tuple = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : Any = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Union[str, Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__A : List[Any] = True
__A : List[str] = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Optional[Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__A : str = len(_UpperCAmelCase)
# Check attention is always last and order is fine
__A : Dict = True
__A : Dict = True
__A : Dict = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Union[str, Any] = 1
self.assertEqual(out_len + added_hidden_states , len(_UpperCAmelCase))
__A : Optional[Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
__A : Tuple = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Optional[Any] = outputs.hidden_states
__A : List[str] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
# YOLOS has a different seq_length
__A : Dict = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[str] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : Optional[int] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : List[Any] = YolosModel.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
def _lowerCAmelCase ( ) -> int:
__A : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('hustvl/yolos-small') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = YolosForObjectDetection.from_pretrained('hustvl/yolos-small').to(_UpperCAmelCase)
__A : Any = self.default_image_processor
__A : str = prepare_img()
__A : int = image_processor(images=_UpperCAmelCase , return_tensors='pt').to(_UpperCAmelCase)
# forward pass
with torch.no_grad():
__A : str = model(inputs.pixel_values)
# verify outputs
__A : Tuple = torch.Size((1, 100, 92))
self.assertEqual(outputs.logits.shape , _UpperCAmelCase)
__A : Dict = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=_UpperCAmelCase , )
__A : int = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=_UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _UpperCAmelCase , atol=1e-4))
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _UpperCAmelCase , atol=1e-4))
# verify postprocessing
__A : List[str] = image_processor.post_process_object_detection(
_UpperCAmelCase , threshold=0.3 , target_sizes=[image.size[::-1]])[0]
__A : Optional[int] = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861]).to(_UpperCAmelCase)
__A : Union[str, Any] = [75, 75, 17, 63, 17]
__A : Any = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495]).to(_UpperCAmelCase)
self.assertEqual(len(results['scores']) , 5)
self.assertTrue(torch.allclose(results['scores'] , _UpperCAmelCase , atol=1e-4))
self.assertSequenceEqual(results['labels'].tolist() , _UpperCAmelCase)
self.assertTrue(torch.allclose(results['boxes'][0, :] , _UpperCAmelCase)) | 8 | 1 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowercase__ : Any = '''pt'''
elif is_tf_available():
lowercase__ : Tuple = '''tf'''
else:
lowercase__ : Union[str, Any] = '''jax'''
class SCREAMING_SNAKE_CASE (a__ , unittest.TestCase ):
lowerCAmelCase = ByTaTokenizer
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().setUp()
__A : Any = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small')
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=20 , _UpperCAmelCase=5):
'''simple docstring'''
__A : Optional[Any] = []
for i in range(len(_UpperCAmelCase)):
try:
__A : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_UpperCAmelCase)
except UnicodeDecodeError:
pass
toks.append((i, tok))
__A : List[str] = list(filter(lambda _UpperCAmelCase: re.match(R'^[ a-zA-Z]+$' , t[1]) , _UpperCAmelCase))
__A : Optional[Any] = list(filter(lambda _UpperCAmelCase: [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_UpperCAmelCase) , _UpperCAmelCase))
if max_length is not None and len(_UpperCAmelCase) > max_length:
__A : Optional[int] = toks[:max_length]
if min_length is not None and len(_UpperCAmelCase) < min_length and len(_UpperCAmelCase) > 0:
while len(_UpperCAmelCase) < min_length:
__A : Tuple = toks + toks
# toks_str = [t[1] for t in toks]
__A : Tuple = [t[0] for t in toks]
# Ensure consistency
__A : Optional[Any] = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase)
if " " not in output_txt and len(_UpperCAmelCase) > 1:
__A : Union[str, Any] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_UpperCAmelCase)
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_UpperCAmelCase)
)
if with_prefix_space:
__A : Dict = ' ' + output_txt
__A : int = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase)
return output_txt, output_ids
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.ta_base_tokenizer
__A : Tuple = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'])
__A : List[Any] = tokenizer(['hi', 'I went to the gym', ''])
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'])
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.ta_base_tokenizer
__A : Any = 'Unicode €.'
__A : Union[str, Any] = tokenizer(_UpperCAmelCase)
__A : Optional[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , _UpperCAmelCase)
# decoding
__A : List[str] = tokenizer.decode(_UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , 'Unicode €.</s>')
__A : Any = tokenizer('e è é ê ë')
__A : List[str] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , _UpperCAmelCase)
# decoding
__A : Optional[int] = tokenizer.decode(_UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , 'e è é ê ë</s>')
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë')) , 'e è é ê ë</s>')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.ta_base_tokenizer
__A : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__A : Tuple = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
__A : str = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
if FRAMEWORK != "jax":
__A : Optional[Any] = list(batch.input_ids.numpy()[0])
else:
__A : str = list(batch.input_ids.tolist()[0])
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
self.assertEqual((2, 37) , batch.input_ids.shape)
self.assertEqual((2, 37) , batch.attention_mask.shape)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.ta_base_tokenizer
__A : str = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__A : Tuple = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase)
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _UpperCAmelCase)
self.assertIn('attention_mask' , _UpperCAmelCase)
self.assertNotIn('decoder_input_ids' , _UpperCAmelCase)
self.assertNotIn('decoder_attention_mask' , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.ta_base_tokenizer
__A : Any = [
'Summary of the text.',
'Another summary.',
]
__A : Optional[Any] = tokenizer(
text_target=_UpperCAmelCase , max_length=32 , padding='max_length' , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase)
self.assertEqual(32 , targets['input_ids'].shape[1])
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = self.ta_base_tokenizer
__A : Optional[Any] = ['A long paragraph for summarization. </s>']
__A : Dict = ['Summary of the text. </s>']
# fmt: off
__A : str = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
__A : List[str] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
__A : Any = tokenizer(_UpperCAmelCase , text_target=_UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , batch['input_ids'][0])
self.assertEqual(_UpperCAmelCase , batch['labels'][0])
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}'):
self.assertNotEqual(tokenizer.model_max_length , 42)
# Now let's start the test
__A : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}'):
# Isolate this from the other tests because we save additional tokens/etc
__A : Dict = tempfile.mkdtemp()
__A : Dict = ' He is very happy, UNwant\u00E9d,running'
__A : List[str] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase)
tokenizer.save_pretrained(_UpperCAmelCase)
__A : Optional[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase)
__A : Dict = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
shutil.rmtree(_UpperCAmelCase)
__A : List[Any] = self.get_tokenizers(model_max_length=42)
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}'):
# Isolate this from the other tests because we save additional tokens/etc
__A : List[str] = tempfile.mkdtemp()
__A : str = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'])
__A : List[str] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token')
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens})
__A : Union[str, Any] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase)
tokenizer.save_pretrained(_UpperCAmelCase)
__A : Dict = tokenizer.__class__.from_pretrained(_UpperCAmelCase)
__A : Optional[Any] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens)
self.assertEqual(after_tokenizer.model_max_length , 42)
__A : str = tokenizer.__class__.from_pretrained(_UpperCAmelCase , model_max_length=43)
self.assertEqual(tokenizer.model_max_length , 43)
shutil.rmtree(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase)
with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json') , encoding='utf-8') as json_file:
__A : Tuple = json.load(_UpperCAmelCase)
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json') , encoding='utf-8') as json_file:
__A : List[Any] = json.load(_UpperCAmelCase)
__A : str = [F'<extra_id_{i}>' for i in range(125)]
__A : Union[str, Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
__A : List[Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json') , 'w' , encoding='utf-8') as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase)
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json') , 'w' , encoding='utf-8') as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase)
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__A : Any = tokenizer_class.from_pretrained(
_UpperCAmelCase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens)
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'])) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__A : Union[str, Any] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_UpperCAmelCase)]
__A : int = tokenizer_class.from_pretrained(
_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens)
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'])) , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase)
__A : Optional[Any] = tokenizer_class.from_pretrained(_UpperCAmelCase)
self.assertTrue(tokenizer.decode([255]) == '')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.get_tokenizers(fast=_UpperCAmelCase , do_lower_case=_UpperCAmelCase)
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}'):
__A : List[str] = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
__A : Tuple = tokenizer.convert_tokens_to_string(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}'):
__A : Tuple = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
__A : Optional[int] = 0
__A : List[Any] = tokenizer.convert_ids_to_tokens(
_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase)
for attr in attributes_list:
setattr(_UpperCAmelCase , attr + '_id' , _UpperCAmelCase)
self.assertEqual(getattr(_UpperCAmelCase , _UpperCAmelCase) , _UpperCAmelCase)
self.assertEqual(getattr(_UpperCAmelCase , attr + '_id') , _UpperCAmelCase)
setattr(_UpperCAmelCase , attr + '_id' , _UpperCAmelCase)
self.assertEqual(getattr(_UpperCAmelCase , _UpperCAmelCase) , _UpperCAmelCase)
self.assertEqual(getattr(_UpperCAmelCase , attr + '_id') , _UpperCAmelCase)
setattr(_UpperCAmelCase , 'additional_special_tokens_ids' , [])
self.assertListEqual(getattr(_UpperCAmelCase , 'additional_special_tokens') , [])
self.assertListEqual(getattr(_UpperCAmelCase , 'additional_special_tokens_ids') , [])
setattr(_UpperCAmelCase , 'additional_special_tokens_ids' , [token_id_to_test_setters])
self.assertListEqual(getattr(_UpperCAmelCase , 'additional_special_tokens') , [token_to_test_setters])
self.assertListEqual(getattr(_UpperCAmelCase , 'additional_special_tokens_ids') , [token_id_to_test_setters]) | 8 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
lowercase__ : Optional[int] = None
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : List[str] = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
lowercase__ : Dict = {
'''camembert-base''': 5_12,
}
lowercase__ : str = '''▁'''
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
lowerCAmelCase = CamembertTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , **_UpperCAmelCase , ):
'''simple docstring'''
__A : int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else mask_token
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__A : List[str] = vocab_file
__A : Optional[int] = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A : Optional[Any] = [self.cls_token_id]
__A : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
__A : Optional[int] = [self.sep_token_id]
__A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(_UpperCAmelCase):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__A : List[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_UpperCAmelCase):
copyfile(self.vocab_file , _UpperCAmelCase)
return (out_vocab_file,) | 8 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : Dict = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class SCREAMING_SNAKE_CASE (a__ , a__ ):
lowerCAmelCase = '''swin'''
lowerCAmelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _UpperCAmelCase=224 , _UpperCAmelCase=4 , _UpperCAmelCase=3 , _UpperCAmelCase=96 , _UpperCAmelCase=[2, 2, 6, 2] , _UpperCAmelCase=[3, 6, 12, 24] , _UpperCAmelCase=7 , _UpperCAmelCase=4.0 , _UpperCAmelCase=True , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase="gelu" , _UpperCAmelCase=False , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=32 , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase)
__A : str = image_size
__A : Dict = patch_size
__A : Union[str, Any] = num_channels
__A : str = embed_dim
__A : List[str] = depths
__A : List[Any] = len(_UpperCAmelCase)
__A : Optional[int] = num_heads
__A : Tuple = window_size
__A : Union[str, Any] = mlp_ratio
__A : List[str] = qkv_bias
__A : Dict = hidden_dropout_prob
__A : Dict = attention_probs_dropout_prob
__A : Any = drop_path_rate
__A : List[str] = hidden_act
__A : str = use_absolute_embeddings
__A : Union[str, Any] = layer_norm_eps
__A : str = initializer_range
__A : int = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__A : Dict = int(embed_dim * 2 ** (len(_UpperCAmelCase) - 1))
__A : Any = ['stem'] + [F'stage{idx}' for idx in range(1 , len(_UpperCAmelCase) + 1)]
__A ,__A : str = get_aligned_output_features_output_indices(
out_features=_UpperCAmelCase , out_indices=_UpperCAmelCase , stage_names=self.stage_names)
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return 1e-4 | 8 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowercase__ : Any = '''hf-internal-testing/tiny-random-bert'''
lowercase__ : Optional[Any] = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
lowercase__ : List[Any] = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase)
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_UpperCAmelCase))
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_UpperCAmelCase , _UpperCAmelCase)))
with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f:
__A : Any = f.read()
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase))
self.assertTrue(os.path.isfile(_UpperCAmelCase))
# File is cached at the same place the second time.
__A : Tuple = cached_file(_UpperCAmelCase , _UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
# Using a specific revision to test the full commit hash.
__A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='9b8c223')
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'):
__A : Dict = cached_file('tiny-random-bert' , _UpperCAmelCase)
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'):
__A : Optional[int] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='aaaa')
with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'):
__A : int = cached_file(_UpperCAmelCase , 'conf')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'):
__A : Any = cached_file(_UpperCAmelCase , 'conf')
with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f:
__A : Dict = f.read()
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , '.no_exist' , _UpperCAmelCase , 'conf')))
__A : List[Any] = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_missing_entries=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
__A : str = cached_file(_UpperCAmelCase , 'conf' , local_files_only=_UpperCAmelCase , _raise_exceptions_for_missing_entries=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
__A : List[str] = mock.Mock()
__A : Dict = 500
__A : List[str] = {}
__A : List[Any] = HTTPError
__A : Optional[Any] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=_UpperCAmelCase) as mock_head:
__A : Dict = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_connection_errors=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt'))
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'):
get_file_from_repo('bert-base-case' , _UpperCAmelCase)
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'):
get_file_from_repo('bert-base-cased' , _UpperCAmelCase , revision='ahaha')
__A : List[str] = get_file_from_repo('bert-base-cased' , _UpperCAmelCase)
# The name is the cached name which is not very easy to test, so instead we load the content.
__A : List[str] = json.loads(open(_UpperCAmelCase , 'r').read())
self.assertEqual(config['hidden_size'] , 768)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__A : Tuple = Path(_UpperCAmelCase) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(_UpperCAmelCase , 'a.txt') , str(_UpperCAmelCase))
self.assertIsNone(get_file_from_repo(_UpperCAmelCase , 'b.txt')) | 8 | 1 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : Union[str, Any] = val
__A : Tuple = None
__A : Any = None
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
__A : Union[str, Any] = Node(_UpperCAmelCase)
else:
self.left.insert(_UpperCAmelCase)
elif val > self.val:
if self.right is None:
__A : Dict = Node(_UpperCAmelCase)
else:
self.right.insert(_UpperCAmelCase)
else:
__A : str = val
def _lowerCAmelCase ( __snake_case : Tuple , __snake_case : int ) -> Union[str, Any]:
# Recursive traversal
if root:
inorder(root.left , __snake_case )
res.append(root.val )
inorder(root.right , __snake_case )
def _lowerCAmelCase ( __snake_case : List[str] ) -> Union[str, Any]:
# Build BST
if len(__snake_case ) == 0:
return arr
__A : List[str] = Node(arr[0] )
for i in range(1 , len(__snake_case ) ):
root.insert(arr[i] )
# Traverse BST in order.
__A : Tuple = []
inorder(__snake_case , __snake_case )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13])) | 8 |
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _lowerCAmelCase ( __snake_case : str , __snake_case : str , **__snake_case : List[Any] ) -> Any:
__A : Optional[Any] = AutoConfig.from_pretrained(__snake_case , **__snake_case )
__A : int = AutoModelForSeqaSeqLM.from_config(__snake_case )
model.save_pretrained(__snake_case )
AutoTokenizer.from_pretrained(__snake_case ).save_pretrained(__snake_case )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version) | 8 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : str = {
'''configuration_rembert''': ['''REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RemBertConfig''', '''RemBertOnnxConfig''']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[int] = ['''RemBertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = ['''RemBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[int] = [
'''REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RemBertForCausalLM''',
'''RemBertForMaskedLM''',
'''RemBertForMultipleChoice''',
'''RemBertForQuestionAnswering''',
'''RemBertForSequenceClassification''',
'''RemBertForTokenClassification''',
'''RemBertLayer''',
'''RemBertModel''',
'''RemBertPreTrainedModel''',
'''load_tf_weights_in_rembert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
'''TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRemBertForCausalLM''',
'''TFRemBertForMaskedLM''',
'''TFRemBertForMultipleChoice''',
'''TFRemBertForQuestionAnswering''',
'''TFRemBertForSequenceClassification''',
'''TFRemBertForTokenClassification''',
'''TFRemBertLayer''',
'''TFRemBertModel''',
'''TFRemBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
lowercase__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
lowercase__ : Any = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''tapas'''
def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1024 , _UpperCAmelCase=[3, 256, 256, 2, 256, 256, 10] , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase=10.0 , _UpperCAmelCase=0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase="ratio" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase)
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__A : Dict = vocab_size
__A : Tuple = hidden_size
__A : Any = num_hidden_layers
__A : int = num_attention_heads
__A : Tuple = hidden_act
__A : Tuple = intermediate_size
__A : List[Any] = hidden_dropout_prob
__A : int = attention_probs_dropout_prob
__A : List[str] = max_position_embeddings
__A : Optional[int] = type_vocab_sizes
__A : str = initializer_range
__A : List[str] = layer_norm_eps
# Fine-tuning task hyperparameters
__A : List[str] = positive_label_weight
__A : List[Any] = num_aggregation_labels
__A : Optional[Any] = aggregation_loss_weight
__A : Tuple = use_answer_as_supervision
__A : List[str] = answer_loss_importance
__A : Any = use_normalized_answer_loss
__A : Any = huber_loss_delta
__A : Union[str, Any] = temperature
__A : Tuple = aggregation_temperature
__A : Optional[Any] = use_gumbel_for_cells
__A : List[str] = use_gumbel_for_aggregation
__A : Tuple = average_approximation_function
__A : List[str] = cell_selection_preference
__A : Dict = answer_loss_cutoff
__A : Union[str, Any] = max_num_rows
__A : Optional[Any] = max_num_columns
__A : int = average_logits_per_cell
__A : Optional[Any] = select_one_column
__A : int = allow_empty_column_selection
__A : List[Any] = init_cell_selection_weights_to_zero
__A : int = reset_position_index_per_cell
__A : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
__A : Optional[Any] = aggregation_labels
__A : List[str] = no_aggregation_label_index
if isinstance(self.aggregation_labels , _UpperCAmelCase):
__A : Optional[Any] = {int(_UpperCAmelCase): v for k, v in aggregation_labels.items()} | 8 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = 42
class SCREAMING_SNAKE_CASE (a__ , a__ ):
@register_to_config
def __init__( self , _UpperCAmelCase = 3 , _UpperCAmelCase = 3 , _UpperCAmelCase = ("DownEncoderBlock2D",) , _UpperCAmelCase = ("UpDecoderBlock2D",) , _UpperCAmelCase = (64,) , _UpperCAmelCase = 1 , _UpperCAmelCase = "silu" , _UpperCAmelCase = 3 , _UpperCAmelCase = 32 , _UpperCAmelCase = 256 , _UpperCAmelCase = 32 , _UpperCAmelCase = None , _UpperCAmelCase = 0.18215 , _UpperCAmelCase = "group" , ):
'''simple docstring'''
super().__init__()
# pass init params to Encoder
__A : Optional[int] = Encoder(
in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , down_block_types=_UpperCAmelCase , block_out_channels=_UpperCAmelCase , layers_per_block=_UpperCAmelCase , act_fn=_UpperCAmelCase , norm_num_groups=_UpperCAmelCase , double_z=_UpperCAmelCase , )
__A : Dict = vq_embed_dim if vq_embed_dim is not None else latent_channels
__A : Union[str, Any] = nn.Convad(_UpperCAmelCase , _UpperCAmelCase , 1)
__A : List[Any] = VectorQuantizer(_UpperCAmelCase , _UpperCAmelCase , beta=0.25 , remap=_UpperCAmelCase , sane_index_shape=_UpperCAmelCase)
__A : Dict = nn.Convad(_UpperCAmelCase , _UpperCAmelCase , 1)
# pass init params to Decoder
__A : Any = Decoder(
in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , up_block_types=_UpperCAmelCase , block_out_channels=_UpperCAmelCase , layers_per_block=_UpperCAmelCase , act_fn=_UpperCAmelCase , norm_num_groups=_UpperCAmelCase , norm_type=_UpperCAmelCase , )
@apply_forward_hook
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = True):
'''simple docstring'''
__A : Optional[int] = self.encoder(_UpperCAmelCase)
__A : str = self.quant_conv(_UpperCAmelCase)
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_UpperCAmelCase)
@apply_forward_hook
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = True):
'''simple docstring'''
if not force_not_quantize:
__A ,__A ,__A : Dict = self.quantize(_UpperCAmelCase)
else:
__A : int = h
__A : List[Any] = self.post_quant_conv(_UpperCAmelCase)
__A : Union[str, Any] = self.decoder(_UpperCAmelCase , quant if self.config.norm_type == 'spatial' else None)
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = True):
'''simple docstring'''
__A : Any = sample
__A : Optional[int] = self.encode(_UpperCAmelCase).latents
__A : Union[str, Any] = self.decode(_UpperCAmelCase).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCAmelCase) | 8 |
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=sys.maxsize):
'''simple docstring'''
__A : Union[str, Any] = 'bilinear'
__A : int = max_size
__A : Optional[Any] = short_edge_length
def __call__( self , _UpperCAmelCase):
'''simple docstring'''
__A : int = []
for img in imgs:
__A ,__A : Dict = img.shape[:2]
# later: provide list and randomly choose index for resize
__A : List[Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
__A : Tuple = size * 1.0 / min(_UpperCAmelCase , _UpperCAmelCase)
if h < w:
__A ,__A : Optional[Any] = size, scale * w
else:
__A ,__A : Optional[Any] = scale * h, size
if max(_UpperCAmelCase , _UpperCAmelCase) > self.max_size:
__A : Tuple = self.max_size * 1.0 / max(_UpperCAmelCase , _UpperCAmelCase)
__A : Tuple = newh * scale
__A : Dict = neww * scale
__A : Dict = int(neww + 0.5)
__A : Optional[int] = int(newh + 0.5)
if img.dtype == np.uinta:
__A : int = Image.fromarray(_UpperCAmelCase)
__A : Optional[int] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
__A : Dict = np.asarray(_UpperCAmelCase)
else:
__A : Optional[Any] = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
__A : Dict = nn.functional.interpolate(
_UpperCAmelCase , (newh, neww) , mode=self.interp_method , align_corners=_UpperCAmelCase).squeeze(0)
img_augs.append(_UpperCAmelCase)
return img_augs
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
__A : List[Any] = cfg.INPUT.FORMAT
__A : Dict = cfg.SIZE_DIVISIBILITY
__A : str = cfg.PAD_VALUE
__A : Union[str, Any] = cfg.INPUT.MAX_SIZE_TEST
__A : int = cfg.MODEL.DEVICE
__A : Tuple = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__A : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__A : int = lambda _UpperCAmelCase: (x - self.pixel_mean) / self.pixel_std
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = tuple(max(_UpperCAmelCase) for s in zip(*[img.shape for img in images]))
__A : Dict = [im.shape[-2:] for im in images]
__A : Optional[int] = [
nn.functional.pad(
_UpperCAmelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_UpperCAmelCase , _UpperCAmelCase)
]
return torch.stack(_UpperCAmelCase), torch.tensor(_UpperCAmelCase)
def __call__( self , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
with torch.no_grad():
if not isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : int = [images]
if single_image:
assert len(_UpperCAmelCase) == 1
for i in range(len(_UpperCAmelCase)):
if isinstance(images[i] , torch.Tensor):
images.insert(_UpperCAmelCase , images.pop(_UpperCAmelCase).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
_UpperCAmelCase , torch.as_tensor(img_tensorize(images.pop(_UpperCAmelCase) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
__A : str = torch.tensor([im.shape[:2] for im in images])
__A : List[str] = self.aug(_UpperCAmelCase)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__A : Any = [self.normalizer(_UpperCAmelCase) for x in images]
# now pad them to do the following operations
__A ,__A : Any = self.pad(_UpperCAmelCase)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__A : str = torch.true_divide(_UpperCAmelCase , _UpperCAmelCase)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : str ) -> Dict:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : Tuple[int, int] ) -> int:
assert torch.isfinite(__snake_case ).all(), "Box tensor contains infinite or NaN!"
__A ,__A : int = box_size
tensor[:, 0].clamp_(min=0 , max=__snake_case )
tensor[:, 1].clamp_(min=0 , max=__snake_case )
tensor[:, 2].clamp_(min=0 , max=__snake_case )
tensor[:, 3].clamp_(min=0 , max=__snake_case ) | 8 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.