code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : int = FlaxXLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
UpperCAmelCase__ : List[str] = AutoTokenizer.from_pretrained("""xlm-roberta-base""" )
UpperCAmelCase__ : Optional[int] = """The dog is cute and lives in the garden house"""
UpperCAmelCase__ : List[Any] = jnp.array([tokenizer.encode(_lowerCAmelCase )] )
UpperCAmelCase__ : str = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase__ : int = jnp.array(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
UpperCAmelCase__ : Optional[Any] = model(_lowerCAmelCase )["""last_hidden_state"""]
self.assertEqual(output.shape , _lowerCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , _lowerCAmelCase , atol=1e-3 ) )
| 79 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCAmelCase: Optional[Any] = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: List[str] = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase: List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 20 | 0 |
def snake_case ( lowerCamelCase ):
'''simple docstring'''
try:
__lowercase = float(lowerCAmelCase__ )
except ValueError:
raise ValueError("""Please enter a valid number""" )
__lowercase = decimal - int(lowerCAmelCase__ )
if fractional_part == 0:
return int(lowerCAmelCase__ ), 1
else:
__lowercase = len(str(lowerCAmelCase__ ).split(""".""" )[1] )
__lowercase = int(decimal * (10**number_of_frac_digits) )
__lowercase = 10**number_of_frac_digits
__lowercase , __lowercase = denominator, numerator
while True:
__lowercase = dividend % divisor
if remainder == 0:
break
__lowercase , __lowercase = divisor, remainder
__lowercase , __lowercase = numerator / divisor, denominator / divisor
return int(lowerCAmelCase__ ), int(lowerCAmelCase__ )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(8_9.0) = }''')
print(F'''{decimal_to_fraction("67") = }''')
print(F'''{decimal_to_fraction("45.0") = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction("6.25") = }''')
print(F'''{decimal_to_fraction("78td") = }''')
| 720 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__UpperCamelCase : Tuple = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 53 | 0 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
SCREAMING_SNAKE_CASE_ = True
except (ImportError, ModuleNotFoundError):
SCREAMING_SNAKE_CASE_ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def lowercase__ ( lowerCAmelCase : str ) -> str:
"""simple docstring"""
re.sub('<n>' , '' , lowerCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowerCAmelCase ) )
| 373 |
"""simple docstring"""
from math import ceil, sqrt
def lowercase__ ( lowerCAmelCase : int = 1_000_000 ) -> int:
"""simple docstring"""
UpperCAmelCase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
UpperCAmelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
UpperCAmelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F'{solution() = }')
| 373 | 1 |
import requests
def a_ (_lowerCAmelCase : str , _lowerCAmelCase : str )-> None:
snake_case: Dict = {"""Content-Type""": """application/json"""}
snake_case: List[str] = requests.post(_lowerCAmelCase , json={"""text""": message_body} , headers=_lowerCAmelCase )
if response.status_code != 200:
snake_case: int = (
"""Request to slack returned an error """
F"{response.status_code}, the response is:\n{response.text}"
)
raise ValueError(_lowerCAmelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 164 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
def a_ (_lowerCAmelCase : str )-> YolosConfig:
snake_case: List[Any] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
snake_case: Optional[int] = 192
snake_case: List[str] = 768
snake_case: int = 12
snake_case: Any = 3
snake_case: Union[str, Any] = [800, 1333]
snake_case: Union[str, Any] = False
elif yolos_name == "yolos_s_dWr":
snake_case: Any = 330
snake_case: Tuple = 14
snake_case: Optional[int] = 6
snake_case: str = 1320
elif "yolos_s" in yolos_name:
snake_case: Dict = 384
snake_case: Optional[int] = 1536
snake_case: Dict = 12
snake_case: Optional[Any] = 6
elif "yolos_b" in yolos_name:
snake_case: Dict = [800, 1344]
snake_case: Union[str, Any] = 91
snake_case: List[str] = """huggingface/label-files"""
snake_case: List[str] = """coco-detection-id2label.json"""
snake_case: str = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
snake_case: str = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case: Optional[int] = idalabel
snake_case: Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def a_ (_lowerCAmelCase : dict , _lowerCAmelCase : YolosConfig , _lowerCAmelCase : bool = False )-> Any:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case: Optional[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
snake_case: str = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case: Tuple = in_proj_weight[: config.hidden_size, :]
snake_case: int = in_proj_bias[: config.hidden_size]
snake_case: Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case: Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case: str = in_proj_weight[-config.hidden_size :, :]
snake_case: List[Any] = in_proj_bias[-config.hidden_size :]
def a_ (_lowerCAmelCase : str )-> str:
if "backbone" in name:
snake_case: str = name.replace("""backbone""" , """vit""" )
if "cls_token" in name:
snake_case: List[str] = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "det_token" in name:
snake_case: int = name.replace("""det_token""" , """embeddings.detection_tokens""" )
if "mid_pos_embed" in name:
snake_case: int = name.replace("""mid_pos_embed""" , """encoder.mid_position_embeddings""" )
if "pos_embed" in name:
snake_case: List[Any] = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
snake_case: str = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "blocks" in name:
snake_case: Tuple = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
snake_case: Optional[Any] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
snake_case: str = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
snake_case: List[str] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
snake_case: List[str] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
snake_case: Dict = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
snake_case: List[str] = name.replace("""mlp.fc2""" , """output.dense""" )
if "class_embed" in name:
snake_case: List[Any] = name.replace("""class_embed""" , """class_labels_classifier""" )
if "bbox_embed" in name:
snake_case: Optional[Any] = name.replace("""bbox_embed""" , """bbox_predictor""" )
if "vit.norm" in name:
snake_case: Tuple = name.replace("""vit.norm""" , """vit.layernorm""" )
return name
def a_ (_lowerCAmelCase : dict , _lowerCAmelCase : YolosForObjectDetection )-> dict:
for key in orig_state_dict.copy().keys():
snake_case: Union[str, Any] = orig_state_dict.pop(_lowerCAmelCase )
if "qkv" in key:
snake_case: Optional[Any] = key.split(""".""" )
snake_case: List[str] = int(key_split[2] )
snake_case: Tuple = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
snake_case: Tuple = val[:dim, :]
snake_case: Any = val[
dim : dim * 2, :
]
snake_case: int = val[-dim:, :]
else:
snake_case: List[str] = val[:dim]
snake_case: List[Any] = val[dim : dim * 2]
snake_case: List[str] = val[-dim:]
else:
snake_case: List[Any] = val
return orig_state_dict
def a_ ()-> torch.Tensor:
snake_case: Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case: Tuple = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def a_ (_lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : bool = False )-> Any:
snake_case: Union[str, Any] = get_yolos_config(_lowerCAmelCase )
# load original state_dict
snake_case: List[str] = torch.load(_lowerCAmelCase , map_location="""cpu""" )["""model"""]
# load 🤗 model
snake_case: Any = YolosForObjectDetection(_lowerCAmelCase )
model.eval()
snake_case: List[Any] = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by YolosImageProcessor
snake_case: Dict = 800 if yolos_name != """yolos_ti""" else 512
snake_case: int = YolosImageProcessor(format="""coco_detection""" , size=_lowerCAmelCase )
snake_case: Union[str, Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case: List[Any] = model(**_lowerCAmelCase )
snake_case , snake_case: Any = outputs.logits, outputs.pred_boxes
snake_case , snake_case: Optional[int] = None, None
if yolos_name == "yolos_ti":
snake_case: List[str] = torch.tensor(
[[-39.5_022, -11.9_820, -17.6_888], [-29.9_574, -9.9_769, -17.7_691], [-42.3_281, -20.7_200, -30.6_294]] )
snake_case: Tuple = torch.tensor(
[[0.4_021, 0.0_836, 0.7_979], [0.0_184, 0.2_609, 0.0_364], [0.1_781, 0.2_004, 0.2_095]] )
elif yolos_name == "yolos_s_200_pre":
snake_case: List[str] = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] )
snake_case: str = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] )
elif yolos_name == "yolos_s_300_pre":
snake_case: Tuple = torch.tensor(
[[-36.2_220, -14.4_385, -23.5_457], [-35.6_970, -14.7_583, -21.3_935], [-31.5_939, -13.6_042, -16.8_049]] )
snake_case: Optional[int] = torch.tensor(
[[0.7_614, 0.2_316, 0.4_728], [0.7_168, 0.4_495, 0.3_855], [0.4_996, 0.1_466, 0.9_996]] )
elif yolos_name == "yolos_s_dWr":
snake_case: Optional[int] = torch.tensor(
[[-42.8_668, -24.1_049, -41.1_690], [-34.7_456, -14.1_274, -24.9_194], [-33.7_898, -12.1_946, -25.6_495]] )
snake_case: Dict = torch.tensor(
[[0.5_587, 0.2_773, 0.0_605], [0.5_004, 0.3_014, 0.9_994], [0.4_999, 0.1_548, 0.9_994]] )
elif yolos_name == "yolos_base":
snake_case: List[Any] = torch.tensor(
[[-40.6_064, -24.3_084, -32.6_447], [-55.1_990, -30.7_719, -35.5_877], [-51.4_311, -33.3_507, -35.6_462]] )
snake_case: Optional[int] = torch.tensor(
[[0.5_555, 0.2_794, 0.0_655], [0.9_049, 0.2_664, 0.1_894], [0.9_183, 0.1_984, 0.1_635]] )
else:
raise ValueError(F"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , _lowerCAmelCase , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , _lowerCAmelCase , atol=1e-4 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
snake_case: Optional[Any] = {
"""yolos_ti""": """yolos-tiny""",
"""yolos_s_200_pre""": """yolos-small""",
"""yolos_s_300_pre""": """yolos-small-300""",
"""yolos_s_dWr""": """yolos-small-dwr""",
"""yolos_base""": """yolos-base""",
}
print("""Pushing to the hub...""" )
snake_case: Tuple = model_mapping[yolos_name]
image_processor.push_to_hub(_lowerCAmelCase , organization="""hustvl""" )
model.push_to_hub(_lowerCAmelCase , organization="""hustvl""" )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--yolos_name',
default='yolos_s_200_pre',
type=str,
help=(
'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','
' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'
),
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCAmelCase : int = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 164 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'num_attention_heads' ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'num_encoder_blocks' ) )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=[2, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[8, 4, 2, 1] , SCREAMING_SNAKE_CASE_=[16, 32, 64, 128] , SCREAMING_SNAKE_CASE_=[1, 4, 8, 16] , SCREAMING_SNAKE_CASE_=[1, 2, 4, 8] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=None , ) -> str:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = num_encoder_blocks
lowerCamelCase_ = sr_ratios
lowerCamelCase_ = depths
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = downsampling_rates
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = scope
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase( self ) -> int:
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
lowerCamelCase_ = SegformerModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = lowerCamelCase_ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = SegformerForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
lowerCamelCase_ = 1
lowerCamelCase_ = SegformerForSemanticSegmentation(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
{
'feature-extraction': SegformerModel,
'image-classification': SegformerForImageClassification,
'image-segmentation': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = SegformerModelTester(self )
lowerCamelCase_ = SegformerConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*SCREAMING_SNAKE_CASE_ )
@unittest.skip('SegFormer does not use inputs_embeds' )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = True
for model_class in self.all_model_classes:
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = True
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = outputs.attentions
lowerCamelCase_ = sum(self.model_tester.depths )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase_ = True
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# verify the first attentions (first block, first layer)
lowerCamelCase_ = (self.model_tester.image_size // 4) ** 2
lowerCamelCase_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
lowerCamelCase_ = (self.model_tester.image_size // 32) ** 2
lowerCamelCase_ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
lowerCamelCase_ = len(SCREAMING_SNAKE_CASE_ )
# Check attention is always last and order is fine
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(out_len + 1 , len(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# verify the first attentions (first block, first layer)
lowerCamelCase_ = (self.model_tester.image_size // 4) ** 2
lowerCamelCase_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = outputs.hidden_states
lowerCamelCase_ = self.model_tester.num_encoder_blocks
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
if not self.model_tester.is_training:
return
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = True
for model_class in self.all_model_classes:
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
continue
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
lowerCamelCase_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
pass
@slow
def UpperCamelCase( self ) -> str:
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = SegformerModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _UpperCamelCase ( ) -> Optional[Any]:
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=SCREAMING_SNAKE_CASE_ , align=SCREAMING_SNAKE_CASE_ , do_random_crop=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
lowerCamelCase_ = encoded_inputs.pixel_values.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
@slow
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=SCREAMING_SNAKE_CASE_ , align=SCREAMING_SNAKE_CASE_ , do_random_crop=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
lowerCamelCase_ = encoded_inputs.pixel_values.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-1 ) )
@slow
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=SCREAMING_SNAKE_CASE_ , align=SCREAMING_SNAKE_CASE_ , do_random_crop=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
lowerCamelCase_ = encoded_inputs.pixel_values.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = outputs.logits.detach().cpu()
lowerCamelCase_ = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_ , target_sizes=[(500, 300)] )
lowerCamelCase_ = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE_ )
| 42 |
def lowerCamelCase_(lowerCamelCase_ ) -> None:
UpperCAmelCase = generate_pascal_triangle(lowerCamelCase_ )
for row_idx in range(lowerCamelCase_ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def lowerCamelCase_(lowerCamelCase_ ) -> list[list[int]]:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
UpperCAmelCase = []
for current_row_idx in range(lowerCamelCase_ ):
UpperCAmelCase = populate_current_row(lowerCamelCase_ , lowerCamelCase_ )
triangle.append(lowerCamelCase_ )
return triangle
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> list[int]:
UpperCAmelCase = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
UpperCAmelCase , UpperCAmelCase = 1, 1
for current_col_idx in range(1 , lowerCamelCase_ ):
calculate_current_element(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return current_row
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> None:
UpperCAmelCase = triangle[current_row_idx - 1][current_col_idx - 1]
UpperCAmelCase = triangle[current_row_idx - 1][current_col_idx]
UpperCAmelCase = above_to_left_elt + above_to_right_elt
def lowerCamelCase_(lowerCamelCase_ ) -> list[list[int]]:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
UpperCAmelCase = [[1]]
for row_index in range(1 , lowerCamelCase_ ):
UpperCAmelCase = [0] + result[-1] + [0]
UpperCAmelCase = row_index + 1
# Calculate the number of distinct elements in a row
UpperCAmelCase = sum(divmod(lowerCamelCase_ , 2 ) )
UpperCAmelCase = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
UpperCAmelCase = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
UpperCAmelCase = row_first_half + row_second_half
result.append(lowerCamelCase_ )
return result
def lowerCamelCase_() -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCamelCase_ , lowerCamelCase_ ) -> None:
UpperCAmelCase = F'{func.__name__}({value})'
UpperCAmelCase = timeit(F'__main__.{call}' , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'{call:38} -- {timing:.4f} seconds' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCamelCase_ , lowerCamelCase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 323 | 0 |
"""simple docstring"""
from __future__ import annotations
a : List[Any] = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _SCREAMING_SNAKE_CASE ( _lowercase : list[list[int]] , _lowercase : list[int] , _lowercase : list[int] , _lowercase : int , _lowercase : list[list[int]] , ) ->List[Any]:
'''simple docstring'''
a : List[Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the reference grid
a : List[str] = 1
a : Union[str, Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the action grid
a : List[Any] = init[0]
a : str = init[1]
a : int = 0
a : Optional[int] = g + heuristic[x][y] # cost from starting cell to destination cell
a : int = [[f, g, x, y]]
a : Union[str, Any] = False # flag that is set when search is complete
a : int = False # flag set if we can't find expand
while not found and not resign:
if len(snake_case__ ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
a : Dict = cell.pop()
a : str = next_cell[2]
a : List[str] = next_cell[3]
a : List[Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
a : Optional[int] = True
else:
for i in range(len(snake_case__ ) ): # to try out different valid actions
a : Dict = x + DIRECTIONS[i][0]
a : Dict = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(snake_case__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
a : List[str] = g + cost
a : str = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
a : str = 1
a : Any = i
a : List[Any] = []
a : List[str] = goal[0]
a : List[Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
a : Any = x - DIRECTIONS[action[x][y]][0]
a : List[Any] = y - DIRECTIONS[action[x][y]][1]
a : Optional[int] = xa
a : int = ya
invpath.append([x, y] )
a : int = []
for i in range(len(snake_case__ ) ):
path.append(invpath[len(snake_case__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
a : Optional[Any] = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
a : Optional[int] = [0, 0]
# all coordinates are given in format [y,x]
a : Tuple = [len(grid) - 1, len(grid[0]) - 1]
a : Any = 1
# the cost map which pushes the path closer to the goal
a : Any = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
a : Tuple = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
a : int = 99
a , a : int = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 706 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
a : Optional[Any] = datasets.utils.logging.get_logger(__name__)
a : Union[str, Any] = ['''names''', '''prefix''']
a : Any = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
a : Any = ['''encoding_errors''', '''on_bad_lines''']
a : List[str] = ['''date_format''']
@dataclass
class __UpperCamelCase ( datasets.BuilderConfig ):
lowerCamelCase : str =","
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[Union[int, List[int], str]] ="infer"
lowerCamelCase : Optional[List[str]] =None
lowerCamelCase : Optional[List[str]] =None
lowerCamelCase : Optional[Union[int, str, List[int], List[str]]] =None
lowerCamelCase : Optional[Union[List[int], List[str]]] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : bool =True
lowerCamelCase : Optional[Literal["c", "python", "pyarrow"]] =None
lowerCamelCase : Dict[Union[int, str], Callable[[Any], Any]] =None
lowerCamelCase : Optional[list] =None
lowerCamelCase : Optional[list] =None
lowerCamelCase : bool =False
lowerCamelCase : Optional[Union[int, List[int]]] =None
lowerCamelCase : Optional[int] =None
lowerCamelCase : Optional[Union[str, List[str]]] =None
lowerCamelCase : bool =True
lowerCamelCase : bool =True
lowerCamelCase : bool =False
lowerCamelCase : bool =True
lowerCamelCase : Optional[str] =None
lowerCamelCase : str ="."
lowerCamelCase : Optional[str] =None
lowerCamelCase : str ='"'
lowerCamelCase : int =0
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : bool =True
lowerCamelCase : bool =True
lowerCamelCase : int =0
lowerCamelCase : bool =True
lowerCamelCase : bool =False
lowerCamelCase : Optional[str] =None
lowerCamelCase : int =1_0000
lowerCamelCase : Optional[datasets.Features] =None
lowerCamelCase : Optional[str] ="strict"
lowerCamelCase : Literal["error", "warn", "skip"] ="error"
lowerCamelCase : Optional[str] =None
def __a ( self ) -> Dict:
if self.delimiter is not None:
a : int = self.delimiter
if self.column_names is not None:
a : Any = self.column_names
@property
def __a ( self ) -> List[str]:
a : Dict = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class __UpperCamelCase ( datasets.ArrowBasedBuilder ):
lowerCamelCase : Union[str, Any] =CsvConfig
def __a ( self ) -> Optional[Any]:
return datasets.DatasetInfo(features=self.config.features )
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
a : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
a : Tuple = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Tuple = [files]
a : int = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
a : int = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Any = [files]
a : List[str] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def __a ( self , lowerCAmelCase__ ) -> pa.Table:
if self.config.features is not None:
a : Optional[Any] = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
a : Dict = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
a : Union[str, Any] = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def __a ( self , lowerCAmelCase__ ) -> Any:
a : Tuple = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
a : Any = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
a : Tuple = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
a : Any = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(lowerCAmelCase__ )}: {e}""" )
raise
| 31 | 0 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def snake_case_ ( __snake_case : List[Any] , __snake_case : str=False) -> Tuple:
try:
lowerCAmelCase_ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowerCAmelCase_ = default
else:
# KEY is set, convert it to True or False.
try:
lowerCAmelCase_ = strtobool(__snake_case)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''')
return _value
A_ : Optional[Any] =parse_flag_from_env('''RUN_SLOW''', default=False)
A_ : Optional[int] =parse_flag_from_env('''RUN_REMOTE''', default=False)
A_ : Any =parse_flag_from_env('''RUN_LOCAL''', default=True)
A_ : List[str] =parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
A_ : Tuple =pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
A_ : int =pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
A_ : List[str] =pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
A_ : Optional[int] =pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
A_ : Any =pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
A_ : Dict =pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
A_ : Dict =pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def snake_case_ ( __snake_case : Optional[Any]) -> int:
try:
import faiss # noqa
except ImportError:
lowerCAmelCase_ = unittest.skip('''test requires faiss''')(__snake_case)
return test_case
def snake_case_ ( __snake_case : List[str]) -> str:
try:
import regex # noqa
except ImportError:
lowerCAmelCase_ = unittest.skip('''test requires regex''')(__snake_case)
return test_case
def snake_case_ ( __snake_case : Any) -> int:
try:
import elasticsearch # noqa
except ImportError:
lowerCAmelCase_ = unittest.skip('''test requires elasticsearch''')(__snake_case)
return test_case
def snake_case_ ( __snake_case : Optional[Any]) -> Tuple:
try:
import sqlalchemy # noqa
except ImportError:
lowerCAmelCase_ = unittest.skip('''test requires sqlalchemy''')(__snake_case)
return test_case
def snake_case_ ( __snake_case : int) -> Union[str, Any]:
if not config.TORCH_AVAILABLE:
lowerCAmelCase_ = unittest.skip('''test requires PyTorch''')(__snake_case)
return test_case
def snake_case_ ( __snake_case : Optional[int]) -> List[Any]:
if not config.TF_AVAILABLE:
lowerCAmelCase_ = unittest.skip('''test requires TensorFlow''')(__snake_case)
return test_case
def snake_case_ ( __snake_case : Optional[Any]) -> List[str]:
if not config.JAX_AVAILABLE:
lowerCAmelCase_ = unittest.skip('''test requires JAX''')(__snake_case)
return test_case
def snake_case_ ( __snake_case : Dict) -> Tuple:
if not config.PIL_AVAILABLE:
lowerCAmelCase_ = unittest.skip('''test requires Pillow''')(__snake_case)
return test_case
def snake_case_ ( __snake_case : Optional[int]) -> Tuple:
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''')(__snake_case)
else:
return test_case
def snake_case_ ( __snake_case : List[Any]) -> Union[str, Any]:
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''')(__snake_case)
else:
return test_case
def snake_case_ ( __snake_case : Any) -> str:
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''')(__snake_case)
else:
return test_case
def snake_case_ ( __snake_case : Dict) -> Union[str, Any]:
def _require_spacy_model(__snake_case : Optional[int]):
try:
import spacy # noqa F401
spacy.load(__snake_case)
except ImportError:
return unittest.skip('''test requires spacy''')(__snake_case)
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(__snake_case))(__snake_case)
else:
return test_case
return _require_spacy_model
def snake_case_ ( __snake_case : Optional[int]) -> Optional[Any]:
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''')(__snake_case)
else:
return test_case
def snake_case_ ( __snake_case : List[str]) -> Union[str, Any]:
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''')(__snake_case)
else:
return test_case
def snake_case_ ( __snake_case : Optional[int]) -> Optional[int]:
if not _run_slow_tests or _run_slow_tests == 0:
lowerCAmelCase_ = unittest.skip('''test is slow''')(__snake_case)
return test_case
def snake_case_ ( __snake_case : Optional[Any]) -> List[str]:
if not _run_local_tests or _run_local_tests == 0:
lowerCAmelCase_ = unittest.skip('''test is local''')(__snake_case)
return test_case
def snake_case_ ( __snake_case : Union[str, Any]) -> Any:
if not _run_packaged_tests or _run_packaged_tests == 0:
lowerCAmelCase_ = unittest.skip('''test is packaged''')(__snake_case)
return test_case
def snake_case_ ( __snake_case : List[str]) -> Optional[int]:
if not _run_remote_tests or _run_remote_tests == 0:
lowerCAmelCase_ = unittest.skip('''test requires remote''')(__snake_case)
return test_case
def snake_case_ ( *__snake_case : List[Any]) -> Optional[int]:
def decorate(cls : Optional[int]):
for name, fn in cls.__dict__.items():
if callable(__snake_case) and name.startswith('''test'''):
for decorator in decorators:
lowerCAmelCase_ = decorator(__snake_case)
setattr(cls , __snake_case , __snake_case)
return cls
return decorate
class __UpperCAmelCase ( __a ):
pass
class __UpperCAmelCase ( __a ):
__A : List[str] = 0
__A : Optional[int] = 1
__A : List[Any] = 2
@contextmanager
def snake_case_ ( __snake_case : Optional[Any]=OfflineSimulationMode.CONNECTION_FAILS , __snake_case : Optional[Any]=1E-1_6) -> str:
lowerCAmelCase_ = requests.Session().request
def timeout_request(__snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Tuple , **__snake_case : int):
# Change the url to an invalid url so that the connection hangs
lowerCAmelCase_ = '''https://10.255.255.1'''
if kwargs.get('''timeout''') is None:
raise RequestWouldHangIndefinitelyError(
F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''')
lowerCAmelCase_ = timeout
try:
return online_request(__snake_case , __snake_case , **__snake_case)
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
lowerCAmelCase_ = url
lowerCAmelCase_ = e.args[0]
lowerCAmelCase_ = (max_retry_error.args[0].replace('''10.255.255.1''' , F'''OfflineMock[{url}]'''),)
lowerCAmelCase_ = (max_retry_error,)
raise
def raise_connection_error(__snake_case : Dict , __snake_case : str , **__snake_case : Union[str, Any]):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=__snake_case)
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , __snake_case):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , __snake_case):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , __snake_case):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''')
@contextmanager
def snake_case_ ( *__snake_case : Any , **__snake_case : Optional[int]) -> int:
lowerCAmelCase_ = str(Path().resolve())
with tempfile.TemporaryDirectory(*__snake_case , **__snake_case) as tmp_dir:
try:
os.chdir(__snake_case)
yield
finally:
os.chdir(__snake_case)
@contextmanager
def snake_case_ ( ) -> Tuple:
import gc
gc.collect()
lowerCAmelCase_ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def snake_case_ ( ) -> Dict:
import gc
gc.collect()
lowerCAmelCase_ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def snake_case_ ( __snake_case : Union[str, Any] , __snake_case : str) -> Any:
return deepcopy(__snake_case).integers(0 , 100 , 10).tolist() == deepcopy(__snake_case).integers(0 , 100 , 10).tolist()
def snake_case_ ( __snake_case : Dict) -> Dict:
import decorator
from requests.exceptions import HTTPError
def _wrapper(__snake_case : Tuple , *__snake_case : int , **__snake_case : List[str]):
try:
return func(*__snake_case , **__snake_case)
except HTTPError as err:
if str(__snake_case).startswith('''500''') or str(__snake_case).startswith('''502'''):
pytest.xfail(str(__snake_case))
raise err
return decorator.decorator(_wrapper , __snake_case)
class __UpperCAmelCase :
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = returncode
lowerCAmelCase_ = stdout
lowerCAmelCase_ = stderr
async def snake_case_ ( __snake_case : str , __snake_case : Tuple) -> List[Any]:
while True:
lowerCAmelCase_ = await stream.readline()
if line:
callback(__snake_case)
else:
break
async def snake_case_ ( __snake_case : int , __snake_case : Any=None , __snake_case : Optional[Any]=None , __snake_case : Optional[Any]=None , __snake_case : Dict=False , __snake_case : Tuple=False) -> _RunOutput:
if echo:
print('''\nRunning: ''' , ''' '''.join(__snake_case))
lowerCAmelCase_ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__snake_case , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__snake_case , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowerCAmelCase_ = []
lowerCAmelCase_ = []
def tee(__snake_case : Optional[int] , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : str=""):
lowerCAmelCase_ = line.decode('''utf-8''').rstrip()
sink.append(__snake_case)
if not quiet:
print(__snake_case , __snake_case , file=__snake_case)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __snake_case: tee(__snake_case , __snake_case , sys.stdout , label='''stdout:''')),
_read_stream(p.stderr , lambda __snake_case: tee(__snake_case , __snake_case , sys.stderr , label='''stderr:''')),
] , timeout=__snake_case , )
return _RunOutput(await p.wait() , __snake_case , __snake_case)
def snake_case_ ( __snake_case : Tuple , __snake_case : Any=None , __snake_case : Any=None , __snake_case : Optional[int]=180 , __snake_case : List[Any]=False , __snake_case : str=True) -> _RunOutput:
lowerCAmelCase_ = asyncio.get_event_loop()
lowerCAmelCase_ = loop.run_until_complete(
_stream_subprocess(__snake_case , env=__snake_case , stdin=__snake_case , timeout=__snake_case , quiet=__snake_case , echo=__snake_case))
lowerCAmelCase_ = ''' '''.join(__snake_case)
if result.returncode > 0:
lowerCAmelCase_ = '''\n'''.join(result.stderr)
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''')
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F'''\'{cmd_str}\' produced no output.''')
return result
def snake_case_ ( ) -> Dict:
lowerCAmelCase_ = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''')
lowerCAmelCase_ = re.sub(R'''^gw''' , '''''' , __snake_case , 0 , re.M)
return int(__snake_case)
def snake_case_ ( ) -> Dict:
lowerCAmelCase_ = 29500
lowerCAmelCase_ = pytest_xdist_worker_id()
return port + uniq_delta
| 274 | '''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Dict =logging.get_logger(__name__)
def snake_case_ ( __snake_case : Any) -> Tuple:
# initialize config
if "resnet-50" in model_name:
lowerCAmelCase_ = ResNetConfig.from_pretrained('''microsoft/resnet-50''')
elif "resnet-101" in model_name:
lowerCAmelCase_ = ResNetConfig.from_pretrained('''microsoft/resnet-101''')
else:
raise ValueError('''Model name should include either resnet50 or resnet101''')
lowerCAmelCase_ = DetrConfig(use_timm_backbone=__snake_case , backbone_config=__snake_case)
# set label attributes
lowerCAmelCase_ = '''panoptic''' in model_name
if is_panoptic:
lowerCAmelCase_ = 250
else:
lowerCAmelCase_ = 91
lowerCAmelCase_ = '''huggingface/label-files'''
lowerCAmelCase_ = '''coco-detection-id2label.json'''
lowerCAmelCase_ = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''') , '''r'''))
lowerCAmelCase_ = {int(__snake_case): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def snake_case_ ( __snake_case : Optional[Any]) -> Tuple:
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCAmelCase_ = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight'''))
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight'''))
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias'''))
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean'''))
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var'''))
# stages
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
))
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
))
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
))
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
))
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
))
# 3 convs
for i in range(3):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
))
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
))
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
))
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
))
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
))
# fmt: on
for i in range(config.encoder_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias'''))
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
))
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
])
return rename_keys
def snake_case_ ( __snake_case : List[str] , __snake_case : Tuple , __snake_case : Optional[Any]) -> List[str]:
lowerCAmelCase_ = state_dict.pop(__snake_case)
lowerCAmelCase_ = val
def snake_case_ ( __snake_case : List[Any] , __snake_case : Optional[int]=False) -> str:
lowerCAmelCase_ = ''''''
if is_panoptic:
lowerCAmelCase_ = '''detr.'''
# first: transformer encoder
for i in range(6):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCAmelCase_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''')
lowerCAmelCase_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''')
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ = in_proj_weight[:256, :]
lowerCAmelCase_ = in_proj_bias[:256]
lowerCAmelCase_ = in_proj_weight[256:512, :]
lowerCAmelCase_ = in_proj_bias[256:512]
lowerCAmelCase_ = in_proj_weight[-256:, :]
lowerCAmelCase_ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6):
# read in weights + bias of input projection layer of self-attention
lowerCAmelCase_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''')
lowerCAmelCase_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''')
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ = in_proj_weight[:256, :]
lowerCAmelCase_ = in_proj_bias[:256]
lowerCAmelCase_ = in_proj_weight[256:512, :]
lowerCAmelCase_ = in_proj_bias[256:512]
lowerCAmelCase_ = in_proj_weight[-256:, :]
lowerCAmelCase_ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
lowerCAmelCase_ = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''')
lowerCAmelCase_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''')
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowerCAmelCase_ = in_proj_weight_cross_attn[:256, :]
lowerCAmelCase_ = in_proj_bias_cross_attn[:256]
lowerCAmelCase_ = in_proj_weight_cross_attn[256:512, :]
lowerCAmelCase_ = in_proj_bias_cross_attn[256:512]
lowerCAmelCase_ = in_proj_weight_cross_attn[-256:, :]
lowerCAmelCase_ = in_proj_bias_cross_attn[-256:]
def snake_case_ ( ) -> str:
lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ = Image.open(requests.get(__snake_case , stream=__snake_case).raw)
return im
@torch.no_grad()
def snake_case_ ( __snake_case : Union[str, Any] , __snake_case : List[Any]=None , __snake_case : Any=False) -> Optional[Any]:
lowerCAmelCase_ ,lowerCAmelCase_ = get_detr_config(__snake_case)
# load original model from torch hub
lowerCAmelCase_ = {
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(F'''Converting model {model_name}...''')
lowerCAmelCase_ = torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=__snake_case).eval()
lowerCAmelCase_ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(__snake_case):
if is_panoptic:
lowerCAmelCase_ = '''detr.''' + src
rename_key(__snake_case , __snake_case , __snake_case)
# query, key and value matrices need special treatment
read_in_q_k_v(__snake_case , is_panoptic=__snake_case)
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCAmelCase_ = '''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''')
and not key.startswith('''class_labels_classifier''')
and not key.startswith('''bbox_predictor''')
):
lowerCAmelCase_ = state_dict.pop(__snake_case)
lowerCAmelCase_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowerCAmelCase_ = state_dict.pop(__snake_case)
lowerCAmelCase_ = val
elif key.startswith('''bbox_attention''') or key.startswith('''mask_head'''):
continue
else:
lowerCAmelCase_ = state_dict.pop(__snake_case)
lowerCAmelCase_ = val
else:
if not key.startswith('''class_labels_classifier''') and not key.startswith('''bbox_predictor'''):
lowerCAmelCase_ = state_dict.pop(__snake_case)
lowerCAmelCase_ = val
# finally, create HuggingFace model and load state dict
lowerCAmelCase_ = DetrForSegmentation(__snake_case) if is_panoptic else DetrForObjectDetection(__snake_case)
model.load_state_dict(__snake_case)
model.eval()
# verify our conversion on an image
lowerCAmelCase_ = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
lowerCAmelCase_ = DetrImageProcessor(format=__snake_case)
lowerCAmelCase_ = processor(images=prepare_img() , return_tensors='''pt''')
lowerCAmelCase_ = encoding['''pixel_values''']
lowerCAmelCase_ = detr(__snake_case)
lowerCAmelCase_ = model(__snake_case)
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-3)
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-3)
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4)
print('''Looks ok!''')
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''')
Path(__snake_case).mkdir(exist_ok=__snake_case)
model.save_pretrained(__snake_case)
processor.save_pretrained(__snake_case)
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''')
model.push_to_hub(F'''nielsr/{model_name}''')
processor.push_to_hub(F'''nielsr/{model_name}''')
if __name__ == "__main__":
A_ : Any =argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''detr-resnet-50''',
type=str,
choices=['''detr-resnet-50''', '''detr-resnet-101'''],
help='''Name of the DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub or not.''')
A_ : Optional[int] =parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 274 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_SCREAMING_SNAKE_CASE : Tuple = 1_6
_SCREAMING_SNAKE_CASE : Optional[Any] = 3_2
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Accelerator , __UpperCamelCase : int = 16 ) -> Optional[Any]:
"""simple docstring"""
A__ : Union[str, Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
A__ : Optional[Any] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__UpperCamelCase : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
A__ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ : str = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ : Union[str, Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__UpperCamelCase : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ : Union[str, Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ : Union[str, Any] = 16
elif accelerator.mixed_precision != "no":
A__ : Dict = 8
else:
A__ : List[Any] = None
return tokenizer.pad(
__UpperCamelCase , padding='''longest''' , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
A__ : Any = DataLoader(
tokenized_datasets['''train'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
A__ : str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_SCREAMING_SNAKE_CASE : Optional[int] = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] ) -> int:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __UpperCamelCase ) == "1":
A__ : Tuple = 2
# New Code #
A__ : Optional[int] = int(args.gradient_accumulation_steps )
A__ : List[Any] = int(args.local_sgd_steps )
# Initialize accelerator
A__ : Tuple = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__UpperCamelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ : Tuple = config['''lr''']
A__ : List[Any] = int(config['''num_epochs'''] )
A__ : int = int(config['''seed'''] )
A__ : List[Any] = int(config['''batch_size'''] )
A__ : List[Any] = evaluate.load('''glue''' , '''mrpc''' )
set_seed(__UpperCamelCase )
A__ , A__ : List[Any] = get_dataloaders(__UpperCamelCase , __UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ : Dict = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
A__ : Dict = AdamW(params=model.parameters() , lr=__UpperCamelCase )
# Instantiate scheduler
A__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ : Dict = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
with LocalSGD(
accelerator=__UpperCamelCase , model=__UpperCamelCase , local_sgd_steps=__UpperCamelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__UpperCamelCase ):
A__ : Tuple = model(**__UpperCamelCase )
A__ : str = output.loss
accelerator.backward(__UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ : str = model(**__UpperCamelCase )
A__ : Dict = outputs.logits.argmax(dim=-1 )
A__ , A__ : int = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
A__ : List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
A__ : List[Any] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__UpperCamelCase , default=__UpperCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__UpperCamelCase , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument(
'''--local_sgd_steps''' , type=__UpperCamelCase , default=8 , help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
A__ : Optional[int] = parser.parse_args()
A__ : Optional[Any] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main() | 55 |
from PIL import Image
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Image , __UpperCamelCase : float ) -> Image:
"""simple docstring"""
def brightness(__UpperCamelCase : int ) -> float:
return 1_28 + level + (c - 1_28)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__UpperCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
_SCREAMING_SNAKE_CASE : Dict = change_brightness(img, 1_0_0)
brigt_img.save('image_data/lena_brightness.png', format='png') | 55 | 1 |
"""simple docstring"""
import math
def _lowerCamelCase ( UpperCAmelCase_ : int ) -> bool:
"""simple docstring"""
return math.sqrt(UpperCAmelCase_ ) * math.sqrt(UpperCAmelCase_ ) == num
def _lowerCamelCase ( UpperCAmelCase_ : int ) -> bool:
"""simple docstring"""
A__ = 0
A__ = n
while left <= right:
A__ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
A__ = mid - 1
else:
A__ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 |
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 104 | 1 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
_UpperCAmelCase : Tuple = TypeVar("""T""")
class lowercase ( Generic[T] ):
__SCREAMING_SNAKE_CASE : deque[T] # Cache store of keys
__SCREAMING_SNAKE_CASE : set[T] # References of the keys in cache
__SCREAMING_SNAKE_CASE : int = 10 # Maximum capacity of cache
def __init__( self , snake_case ):
snake_case_ = deque()
snake_case_ = set()
if not n:
snake_case_ = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
snake_case_ = n
def a ( self , snake_case ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
snake_case_ = self.dq_store.pop()
self.key_reference.remove(snake_case )
else:
self.dq_store.remove(snake_case )
self.dq_store.appendleft(snake_case )
self.key_reference.add(snake_case )
def a ( self ):
for k in self.dq_store:
print(snake_case )
def __repr__( self ):
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase : LRUCache[str | int] = LRUCache(4)
lru_cache.refer("""A""")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("""A""")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 108 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 108 | 1 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
A__ : int = 10
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : list[int] ,__UpperCamelCase : int ):
for i in range(a__ ,a__ ):
if array[i] == target:
return i
return -1
def UpperCamelCase( __UpperCamelCase : list[int] ,__UpperCamelCase : int ):
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : Dict = len(a__ )
while left <= right:
if right - left < precision:
return lin_search(a__ ,a__ ,a__ ,a__ )
lowerCAmelCase_ : Optional[Any] = (left + right) // 3 + 1
lowerCAmelCase_ : Any = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowerCAmelCase_ : Union[str, Any] = one_third - 1
elif array[two_third] < target:
lowerCAmelCase_ : List[Any] = two_third + 1
else:
lowerCAmelCase_ : List[Any] = one_third + 1
lowerCAmelCase_ : int = two_third - 1
else:
return -1
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : list[int] ,__UpperCamelCase : int ):
if left < right:
if right - left < precision:
return lin_search(a__ ,a__ ,a__ ,a__ )
lowerCAmelCase_ : Optional[Any] = (left + right) // 3 + 1
lowerCAmelCase_ : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(a__ ,one_third - 1 ,a__ ,a__ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 ,a__ ,a__ ,a__ )
else:
return rec_ternary_search(one_third + 1 ,two_third - 1 ,a__ ,a__ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ : Any = input('''Enter numbers separated by comma:\n''').strip()
A__ : List[str] = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
A__ : Optional[int] = int(input('''Enter the number to be found in the list:\n''').strip())
A__ : List[Any] = ite_ternary_search(collection, target)
A__ : Optional[int] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'''Iterative search: {target} found at positions: {resulta}''')
print(F'''Recursive search: {target} found at positions: {resulta}''')
else:
print('''Not found''')
| 171 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : complex ,a__ : str = "x" ,a__ : float = 10**-10 ,a__ : int = 1 ,) -> complex:
__A : Tuple = symbols(a__ )
__A : List[str] = lambdify(a__ ,a__ )
__A : Any = lambdify(a__ ,diff(a__ ,a__ ) )
__A : Dict = starting_point
while True:
if diff_function(a__ ) != 0:
__A : Optional[int] = prev_guess - multiplicity * func(a__ ) / diff_function(
a__ )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__A : List[Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 17 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 406 |
"""simple docstring"""
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case = '▁'
snake_case = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class UpperCamelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : int = BertGenerationTokenizer
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : str = True
def A ( self ) -> int:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE = BertGenerationTokenizer(lowercase__ , keep_accents=lowercase__ )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = '<s>'
SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ )
def A ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(lowercase__ ) , 1002 )
def A ( self ) -> Any:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def A ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = BertGenerationTokenizer(lowercase__ , keep_accents=lowercase__ )
SCREAMING_SNAKE_CASE = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase__ ) , [285, 46, 10, 170, 382] , )
SCREAMING_SNAKE_CASE = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(lowercase__ )
self.assertListEqual(
lowercase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(lowercase__ )
self.assertListEqual(
lowercase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def A ( self ) -> Any:
"""simple docstring"""
return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
@slow
def A ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'Hello World!'
SCREAMING_SNAKE_CASE = [18536, 2260, 101]
self.assertListEqual(lowercase__ , self.big_tokenizer.encode(lowercase__ ) )
@slow
def A ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
SCREAMING_SNAKE_CASE = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(lowercase__ , self.big_tokenizer.encode(lowercase__ ) )
@require_torch
@slow
def A ( self ) -> int:
"""simple docstring"""
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
SCREAMING_SNAKE_CASE = list(self.big_tokenizer.get_vocab().keys() )[:10]
SCREAMING_SNAKE_CASE = ' '.join(lowercase__ )
SCREAMING_SNAKE_CASE = self.big_tokenizer.encode_plus(lowercase__ , return_tensors='pt' , return_token_type_ids=lowercase__ )
SCREAMING_SNAKE_CASE = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=lowercase__ )
SCREAMING_SNAKE_CASE = BertGenerationConfig()
SCREAMING_SNAKE_CASE = BertGenerationEncoder(lowercase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowercase__ )
model(**lowercase__ )
@slow
def A ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
| 406 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["stage2", "stage3", "stage4"] , )
UpperCAmelCase_ = DetaConfig(
backbone_config=lowerCAmelCase__ , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=lowerCAmelCase__ , with_box_refine=lowerCAmelCase__ , two_stage=lowerCAmelCase__ , )
# set labels
UpperCAmelCase_ = "huggingface/label-files"
if "o365" in model_name:
UpperCAmelCase_ = 366
UpperCAmelCase_ = "object365-id2label.json"
else:
UpperCAmelCase_ = 91
UpperCAmelCase_ = "coco-detection-id2label.json"
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = json.load(open(cached_download(hf_hub_url(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="dataset" ) ) , "r" ) )
UpperCAmelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
return config
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") )
rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.reduction.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.bias""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") )
rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") )
rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") )
rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") )
rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") )
rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.weight""", f"""model.encoder.layers.{i}.self_attn.attention_weights.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.bias""", f"""model.encoder.layers.{i}.self_attn.attention_weights.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.weight""", f"""model.encoder.layers.{i}.self_attn.value_proj.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.bias""", f"""model.encoder.layers.{i}.self_attn.value_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.weight""", f"""model.encoder.layers.{i}.self_attn.output_proj.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.bias""", f"""model.encoder.layers.{i}.self_attn.output_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.weight""", f"""model.encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""model.encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""model.encoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""model.encoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""model.encoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""model.encoder.layers.{i}.fc2.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""model.encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""model.encoder.layers.{i}.final_layer_norm.bias""") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.weight""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.bias""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.weight""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""model.decoder.layers.{i}.self_attn.out_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""model.decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.weight""", f"""model.decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.bias""", f"""model.decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""model.decoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""model.decoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""model.decoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""model.decoder.layers.{i}.fc2.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""model.decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""model.decoder.layers.{i}.final_layer_norm.bias""") )
# fmt: on
return rename_keys
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = dct.pop(lowerCAmelCase__ )
UpperCAmelCase_ = val
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCAmelCase_ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight""" )
UpperCAmelCase_ = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:dim, :]
UpperCAmelCase_ = in_proj_bias[: dim]
UpperCAmelCase_ = in_proj_weight[
dim : dim * 2, :
]
UpperCAmelCase_ = in_proj_bias[
dim : dim * 2
]
UpperCAmelCase_ = in_proj_weight[
-dim :, :
]
UpperCAmelCase_ = in_proj_bias[-dim :]
# fmt: on
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
# transformer decoder self-attention layers
UpperCAmelCase_ = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
UpperCAmelCase_ = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:hidden_size, :]
UpperCAmelCase_ = in_proj_bias[:hidden_size]
UpperCAmelCase_ = in_proj_weight[
hidden_size : hidden_size * 2, :
]
UpperCAmelCase_ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase_ = in_proj_weight[-hidden_size:, :]
UpperCAmelCase_ = in_proj_bias[-hidden_size:]
def a__ ( ):
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = get_deta_config(lowerCAmelCase__ )
# load original state dict
if model_name == "deta-swin-large":
UpperCAmelCase_ = hf_hub_download(repo_id="nielsr/deta-checkpoints" , filename="adet_swin_ft.pth" )
elif model_name == "deta-swin-large-o365":
UpperCAmelCase_ = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" , filename="deta_swin_pt_o365.pth" )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
UpperCAmelCase_ = torch.load(lowerCAmelCase__ , map_location="cpu" )["model"]
# original state dict
for name, param in state_dict.items():
print(lowerCAmelCase__ , param.shape )
# rename keys
UpperCAmelCase_ = create_rename_keys(lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_swin_q_k_v(lowerCAmelCase__ , config.backbone_config )
read_in_decoder_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
UpperCAmelCase_ = state_dict.pop(lowerCAmelCase__ )
UpperCAmelCase_ = val
if "input_proj" in key:
UpperCAmelCase_ = state_dict.pop(lowerCAmelCase__ )
UpperCAmelCase_ = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
UpperCAmelCase_ = state_dict.pop(lowerCAmelCase__ )
UpperCAmelCase_ = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase_ = DetaForObjectDetection(lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
UpperCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
model.to(lowerCAmelCase__ )
# load image processor
UpperCAmelCase_ = DetaImageProcessor(format="coco_detection" )
# verify our conversion on image
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = processor(images=lowerCAmelCase__ , return_tensors="pt" )
UpperCAmelCase_ = encoding["pixel_values"]
UpperCAmelCase_ = model(pixel_values.to(lowerCAmelCase__ ) )
# verify logits
print("Logits:" , outputs.logits[0, :3, :3] )
print("Boxes:" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
UpperCAmelCase_ = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
UpperCAmelCase_ = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
UpperCAmelCase_ = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
UpperCAmelCase_ = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(lowerCAmelCase__ ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(lowerCAmelCase__ ) , atol=1e-4 )
print("Everything ok!" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f"""Saving PyTorch model and processor to {pytorch_dump_folder_path}...""" )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
# Push to hub
if push_to_hub:
print("Pushing model and processor to hub..." )
model.push_to_hub(f"""jozhang97/{model_name}""" )
processor.push_to_hub(f"""jozhang97/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
type=str,
default="""deta-swin-large""",
choices=["""deta-swin-large""", """deta-swin-large-o365"""],
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
help="""Path to the folder to output PyTorch model.""",
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 82 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
lowerCAmelCase__ : Optional[int] ='%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: ')))
print('Googling.....')
lowerCAmelCase__ : Optional[int] =F"""https://www.google.com/search?q={query}&num=100"""
lowerCAmelCase__ : Union[str, Any] =requests.get(
url,
headers={'User-Agent': str(UserAgent().random)},
)
try:
lowerCAmelCase__ : str =(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'yuRUbf'})
.find('a')
.get('href')
)
except AttributeError:
lowerCAmelCase__ : Optional[Any] =parse_qs(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'kCrYT'})
.find('a')
.get('href')
)['url'][0]
webbrowser.open(link)
| 101 | 0 |
_UpperCAmelCase = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 297 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
_UpperCAmelCase = logging.getLogger()
_UpperCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __magic_name__ ( lowercase_ ):
"""simple docstring"""
def _UpperCAmelCase ( self , a__ ):
os.makedirs(a__ , exist_ok=a__ )
_lowerCamelCase = {'''source''': '''What is love ?''', '''target''': '''life'''}
_lowerCamelCase = {'''train''': 12, '''val''': 2, '''test''': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
_lowerCamelCase = '''\n'''.join([contents[field]] * n_lines[split] )
with open(os.path.join(a__ , f'''{split}.{field}''' ) , '''w''' ) as f:
f.write(a__ )
def _UpperCAmelCase ( self , a__ , a__ = "pytorch" ):
_lowerCamelCase = self.get_auto_remove_tmp_dir()
_lowerCamelCase = os.path.join(a__ , '''output''' )
_lowerCamelCase = os.path.join(a__ , '''data''' )
self._create_dummy_data(data_dir=a__ )
_lowerCamelCase = f'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(f'''--gpus={gpus}''' )
if is_apex_available():
testargs.append('''--fp16''' )
else:
testargs.append('''--gpus=0''' )
testargs.append('''--distributed_backend=ddp_cpu''' )
testargs.append('''--num_processes=2''' )
_lowerCamelCase = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(a__ , env=self.get_env() )
_lowerCamelCase = os.path.join(a__ , '''metrics.json''' )
with open(a__ ) as f:
_lowerCamelCase = json.load(a__ )
return result
@require_torch_gpu
def _UpperCAmelCase ( self ):
_lowerCamelCase = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_multi_gpu
def _UpperCAmelCase ( self ):
_lowerCamelCase = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_gpu
@require_ray
def _UpperCAmelCase ( self ):
_lowerCamelCase = self._run_finetune(gpus=1 , distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _UpperCAmelCase ( self ):
_lowerCamelCase = self._run_finetune(gpus=1 , distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
| 297 | 1 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=32 , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=16 , UpperCamelCase__=[32, 64, 1_28] , UpperCamelCase__=[1, 2, 1] , UpperCamelCase__=[2, 2, 4] , UpperCamelCase__=2 , UpperCamelCase__=2.0 , UpperCamelCase__=True , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.1 , UpperCamelCase__="gelu" , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=0.02 , UpperCamelCase__=1e-5 , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=10 , UpperCamelCase__=8 , UpperCamelCase__=["stage1", "stage2"] , UpperCamelCase__=[1, 2] , ):
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = embed_dim
A__ = hidden_sizes
A__ = depths
A__ = num_heads
A__ = window_size
A__ = mlp_ratio
A__ = qkv_bias
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = drop_path_rate
A__ = hidden_act
A__ = use_absolute_embeddings
A__ = patch_norm
A__ = layer_norm_eps
A__ = initializer_range
A__ = is_training
A__ = scope
A__ = use_labels
A__ = type_sequence_label_size
A__ = encoder_stride
A__ = out_features
A__ = out_indices
def lowercase_ ( self ):
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ):
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = FocalNetModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ = model(UpperCamelCase__ )
A__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
A__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = FocalNetBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ = model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
A__ = None
A__ = FocalNetBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ = model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = FocalNetForMaskedImageModeling(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ = model(UpperCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A__ = 1
A__ = FocalNetForMaskedImageModeling(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = self.type_sequence_label_size
A__ = FocalNetForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A__ = 1
A__ = FocalNetForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
lowercase__ : List[str] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowercase__ : List[Any] = (
{"""feature-extraction""": FocalNetModel, """image-classification""": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : Any = False
lowercase__ : List[Any] = False
lowercase__ : Dict = False
lowercase__ : List[str] = False
lowercase__ : int = False
def lowercase_ ( self ):
'''simple docstring'''
A__ = FocalNetModelTester(self )
A__ = ConfigTester(self , config_class=UpperCamelCase__ , embed_dim=37 , has_text_modality=UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self ):
'''simple docstring'''
return
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def lowercase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def lowercase_ ( self ):
'''simple docstring'''
pass
def lowercase_ ( self ):
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
A__ = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def lowercase_ ( self ):
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
A__ = model_class(UpperCamelCase__ )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ = outputs.hidden_states
A__ = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# FocalNet has a different seq_length
A__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
A__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
A__ = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
A__ , A__ , A__ , A__ = reshaped_hidden_states[0].shape
A__ = (
reshaped_hidden_states[0].view(UpperCamelCase__ , UpperCamelCase__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowercase_ ( self ):
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
A__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = 3
A__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
A__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
A__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
A__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
A__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) )
@slow
def lowercase_ ( self ):
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = FocalNetModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = _config_zero_init(UpperCamelCase__ )
for model_class in self.all_model_classes:
A__ = model_class(config=UpperCamelCase__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def lowercase_ ( self ):
'''simple docstring'''
A__ = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(UpperCamelCase__ )
A__ = self.default_image_processor
A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A__ = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
A__ = model(**UpperCamelCase__ )
# verify the logits
A__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ = torch.tensor([0.2166, -0.4368, 0.2191] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 )
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowercase__ : Tuple = (FocalNetBackbone,) if is_torch_available() else ()
lowercase__ : Any = FocalNetConfig
lowercase__ : Union[str, Any] = False
def lowercase_ ( self ):
'''simple docstring'''
A__ = FocalNetModelTester(self ) | 337 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __a ( ) -> Optional[Any]:
'''simple docstring'''
A__ = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=A , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=A , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=A )
return parser.parse_args()
def __a ( ) -> List[str]:
'''simple docstring'''
A__ = parse_args()
# Import training_script as a module.
A__ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
A__ = script_fpath.stem
A__ = importlib.import_module(A )
# Patch sys.argv
A__ = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main() | 337 | 1 |
import sys
from collections import defaultdict
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] ):
__lowercase : Tuple = []
def snake_case_ ( self : Optional[Any] , _snake_case : Dict ):
return self.node_position[vertex]
def snake_case_ ( self : List[Any] , _snake_case : Any , _snake_case : Any ):
__lowercase : int = pos
def snake_case_ ( self : Union[str, Any] , _snake_case : Tuple , _snake_case : Optional[Any] , _snake_case : Dict , _snake_case : Optional[Any] ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowercase : str = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowercase : Optional[Any] = 2 * start + 1
else:
__lowercase : Any = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowercase : List[str] = heap[smallest_child], positions[smallest_child]
__lowercase : Dict = (
heap[start],
positions[start],
)
__lowercase : Dict = temp, tempa
__lowercase : int = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , _snake_case )
self.top_to_bottom(_snake_case , _snake_case , _snake_case , _snake_case )
def snake_case_ ( self : List[Any] , _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Union[str, Any] ):
__lowercase : Tuple = position[index]
while index != 0:
__lowercase : Dict = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowercase : Union[str, Any] = heap[parent]
__lowercase : Optional[int] = position[parent]
self.set_position(position[parent] , _snake_case )
else:
__lowercase : Union[str, Any] = val
__lowercase : Union[str, Any] = temp
self.set_position(_snake_case , _snake_case )
break
__lowercase : Tuple = parent
else:
__lowercase : List[Any] = val
__lowercase : Any = temp
self.set_position(_snake_case , 0 )
def snake_case_ ( self : Dict , _snake_case : str , _snake_case : Optional[int] ):
__lowercase : Optional[Any] = len(_snake_case ) // 2 - 1
for i in range(_snake_case , -1 , -1 ):
self.top_to_bottom(_snake_case , _snake_case , len(_snake_case ) , _snake_case )
def snake_case_ ( self : Union[str, Any] , _snake_case : Dict , _snake_case : Optional[Any] ):
__lowercase : List[Any] = positions[0]
__lowercase : Optional[int] = sys.maxsize
self.top_to_bottom(_snake_case , 0 , len(_snake_case ) , _snake_case )
return temp
def UpperCAmelCase_ ( __lowerCAmelCase ) -> int:
__lowercase : Union[str, Any] = Heap()
__lowercase : Tuple = [0] * len(__lowerCAmelCase )
__lowercase : Optional[int] = [-1] * len(__lowerCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowercase : List[str] = [] # Heap of Distance of vertices from their neighboring vertex
__lowercase : Optional[Any] = []
for vertex in range(len(__lowerCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(__lowerCAmelCase )
heap.node_position.append(__lowerCAmelCase )
__lowercase : Any = []
__lowercase : str = 1
__lowercase : List[Any] = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowercase : List[str] = 0
__lowercase : Optional[int] = distance
heap.heapify(__lowerCAmelCase , __lowerCAmelCase )
for _ in range(1 , len(__lowerCAmelCase ) ):
__lowercase : Tuple = heap.delete_minimum(__lowerCAmelCase , __lowerCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowercase : List[Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__lowerCAmelCase )]
):
__lowercase : Dict = distance
heap.bottom_to_top(
__lowerCAmelCase , heap.get_position(__lowerCAmelCase ) , __lowerCAmelCase , __lowerCAmelCase )
__lowercase : Optional[Any] = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__lowerCAmelCase : Any = int(input("Enter number of edges: ").strip())
__lowerCAmelCase : Any = defaultdict(list)
for _ in range(edges_number):
__lowerCAmelCase : str = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 719 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 284 | 0 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowercase__ ( lowerCamelCase : list[list[float]] ) -> list[list[float]]:
lowerCAmelCase__ : List[str] = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(lowerCamelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
lowerCAmelCase__ : Tuple = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creates a copy of the matrix with swapped positions of the elements
lowerCAmelCase__ : Optional[Any] = [[0.0, 0.0], [0.0, 0.0]]
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = matrix[1][1], matrix[0][0]
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(lowerCamelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(lowerCamelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
lowerCAmelCase__ : Union[str, Any] = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creating cofactor matrix
lowerCAmelCase__ : int = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
lowerCAmelCase__ : List[str] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
lowerCAmelCase__ : Any = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
lowerCAmelCase__ : str = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
lowerCAmelCase__ : str = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
lowerCAmelCase__ : List[str] = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
lowerCAmelCase__ : Any = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
lowerCAmelCase__ : List[str] = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
lowerCAmelCase__ : int = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
lowerCAmelCase__ : int = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
lowerCAmelCase__ : List[str] = array(lowerCamelCase )
for i in range(3 ):
for j in range(3 ):
lowerCAmelCase__ : Dict = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
lowerCAmelCase__ : str = array(lowerCamelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(lowerCamelCase )
# Calculate the inverse of the matrix
return [[float(d(lowerCamelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("Please provide a matrix of size 2x2 or 3x3." )
| 308 |
"""simple docstring"""
def lowercase__ ( lowerCamelCase : int , lowerCamelCase : int ) -> int:
return int(input_a == input_a == 0 )
def lowercase__ ( ) -> None:
print("Truth Table of NOR Gate:" )
print("| Input 1 | Input 2 | Output |" )
print(F"| 0 | 0 | {nor_gate(0 , 0 )} |" )
print(F"| 0 | 1 | {nor_gate(0 , 1 )} |" )
print(F"| 1 | 0 | {nor_gate(1 , 0 )} |" )
print(F"| 1 | 1 | {nor_gate(1 , 1 )} |" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 308 | 1 |
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : int = '''T5Config'''
class lowercase ( a ):
lowercase__ : Tuple = """mt5"""
lowercase__ : Tuple = MTaConfig
class lowercase ( a ):
lowercase__ : Optional[int] = """mt5"""
lowercase__ : List[Any] = MTaConfig
class lowercase ( a ):
lowercase__ : Union[str, Any] = """mt5"""
lowercase__ : List[Any] = MTaConfig
| 647 | import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def __lowerCamelCase (UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict=False ):
SCREAMING_SNAKE_CASE = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : str=False ):
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE = ""
else:
SCREAMING_SNAKE_CASE = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :]
def __lowerCamelCase (UpperCAmelCase__ : Tuple ):
SCREAMING_SNAKE_CASE = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : Any ):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
SCREAMING_SNAKE_CASE = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ):
SCREAMING_SNAKE_CASE = dct.pop(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = val
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str ):
SCREAMING_SNAKE_CASE = ViTMSNConfig()
SCREAMING_SNAKE_CASE = 1_0_0_0
SCREAMING_SNAKE_CASE = "datasets/huggingface/label-files"
SCREAMING_SNAKE_CASE = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ ) , "r" ) )
SCREAMING_SNAKE_CASE = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE = 3_8_4
SCREAMING_SNAKE_CASE = 1_5_3_6
SCREAMING_SNAKE_CASE = 6
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE = 1_0_2_4
SCREAMING_SNAKE_CASE = 4_0_9_6
SCREAMING_SNAKE_CASE = 2_4
SCREAMING_SNAKE_CASE = 1_6
SCREAMING_SNAKE_CASE = 0.1
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE = 4
elif "l7" in checkpoint_url:
SCREAMING_SNAKE_CASE = 7
SCREAMING_SNAKE_CASE = 1_0_2_4
SCREAMING_SNAKE_CASE = 4_0_9_6
SCREAMING_SNAKE_CASE = 2_4
SCREAMING_SNAKE_CASE = 1_6
SCREAMING_SNAKE_CASE = 0.1
SCREAMING_SNAKE_CASE = ViTMSNModel(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(UpperCAmelCase__ , map_location="cpu" )["target_encoder"]
SCREAMING_SNAKE_CASE = ViTImageProcessor(size=config.image_size )
remove_projection_head(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = create_rename_keys(UpperCAmelCase__ , base_model=UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
read_in_q_k_v(UpperCAmelCase__ , UpperCAmelCase__ , base_model=UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw )
SCREAMING_SNAKE_CASE = ViTImageProcessor(
size=config.image_size , image_mean=UpperCAmelCase__ , image_std=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = image_processor(images=UpperCAmelCase__ , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE = model(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
SCREAMING_SNAKE_CASE = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , UpperCAmelCase__ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCAmelCase__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 647 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : int = logging.get_logger(__name__)
_a : List[Any] = {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class __A (lowerCAmelCase_ ):
snake_case :Tuple = "gpt_neox_japanese"
def __init__( self , UpperCamelCase_=3_20_00 , UpperCamelCase_=25_60 , UpperCamelCase_=32 , UpperCamelCase_=32 , UpperCamelCase_=4 , UpperCamelCase_="gelu" , UpperCamelCase_=1.0_0 , UpperCamelCase_=1_00_00 , UpperCamelCase_=20_48 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-5 , UpperCamelCase_=True , UpperCamelCase_=3_19_96 , UpperCamelCase_=3_19_99 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0 , **UpperCamelCase_ , ):
super().__init__(bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = vocab_size
__UpperCAmelCase : Union[str, Any] = max_position_embeddings
__UpperCAmelCase : Dict = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : List[Any] = num_attention_heads
__UpperCAmelCase : Optional[Any] = intermediate_multiple_size
__UpperCAmelCase : Optional[int] = hidden_act
__UpperCAmelCase : List[Any] = rotary_pct
__UpperCAmelCase : Dict = rotary_emb_base
__UpperCAmelCase : Any = initializer_range
__UpperCAmelCase : Tuple = layer_norm_eps
__UpperCAmelCase : Union[str, Any] = use_cache
__UpperCAmelCase : Optional[Any] = attention_dropout
__UpperCAmelCase : Tuple = hidden_dropout
| 168 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
snake_case_ : str = logging.get_logger(__name__)
class snake_case__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = ['''pixel_values''']
def __init__( self : Any , lowercase : bool = True , lowercase : Dict[str, int] = None , lowercase : PILImageResampling = PILImageResampling.BILINEAR , lowercase : bool = True , lowercase : Union[int, float] = 1 / 2_55 , lowercase : bool = True , lowercase : Dict[str, int] = None , lowercase : bool = True , **lowercase : Tuple , ):
'''simple docstring'''
super().__init__(**lowercase )
UpperCAmelCase : Optional[int] = size if size is not None else {"shortest_edge": 2_24}
UpperCAmelCase : Any = get_size_dict(lowercase , default_to_square=lowercase )
UpperCAmelCase : Union[str, Any] = crop_size if crop_size is not None else {"height": 2_56, "width": 2_56}
UpperCAmelCase : List[Any] = get_size_dict(lowercase , param_name="crop_size" )
UpperCAmelCase : str = do_resize
UpperCAmelCase : Union[str, Any] = size
UpperCAmelCase : Any = resample
UpperCAmelCase : str = do_rescale
UpperCAmelCase : List[str] = rescale_factor
UpperCAmelCase : List[str] = do_center_crop
UpperCAmelCase : Any = crop_size
UpperCAmelCase : str = do_flip_channel_order
def __lowerCAmelCase ( self : Optional[int] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : PILImageResampling = PIL.Image.BILINEAR , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : List[Any] , ):
'''simple docstring'''
UpperCAmelCase : Dict = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCAmelCase : Dict = get_resize_output_image_size(lowercase , size=size["shortest_edge"] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def __lowerCAmelCase ( self : List[str] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : List[Any] , ):
'''simple docstring'''
UpperCAmelCase : int = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(lowercase , size=(size["height"], size["width"]) , data_format=lowercase , **lowercase )
def __lowerCAmelCase ( self : Any , lowercase : np.ndarray , lowercase : Union[int, float] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : List[Any] , ):
'''simple docstring'''
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def __lowerCAmelCase ( self : Optional[int] , lowercase : np.ndarray , lowercase : Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
return flip_channel_order(lowercase , data_format=lowercase )
def __lowerCAmelCase ( self : Optional[Any] , lowercase : ImageInput , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : PILImageResampling = None , lowercase : bool = None , lowercase : float = None , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : bool = None , lowercase : Optional[Union[str, TensorType]] = None , lowercase : ChannelDimension = ChannelDimension.FIRST , **lowercase : str , ):
'''simple docstring'''
UpperCAmelCase : Tuple = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase : List[str] = resample if resample is not None else self.resample
UpperCAmelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase : str = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase : Dict = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
UpperCAmelCase : Optional[int] = size if size is not None else self.size
UpperCAmelCase : Any = get_size_dict(lowercase , default_to_square=lowercase )
UpperCAmelCase : List[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase : int = get_size_dict(lowercase , param_name="crop_size" )
UpperCAmelCase : List[str] = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
UpperCAmelCase : List[str] = [to_numpy_array(lowercase ) for image in images]
if do_resize:
UpperCAmelCase : Dict = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
UpperCAmelCase : Optional[Any] = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
UpperCAmelCase : Optional[Any] = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
UpperCAmelCase : Tuple = [self.flip_channel_order(image=lowercase ) for image in images]
UpperCAmelCase : Tuple = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
UpperCAmelCase : Tuple = {"pixel_values": images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
def __lowerCAmelCase ( self : Dict , lowercase : Union[str, Any] , lowercase : List[Tuple] = None ):
'''simple docstring'''
UpperCAmelCase : int = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase ) != len(lowercase ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(lowercase ):
UpperCAmelCase : Optional[Any] = target_sizes.numpy()
UpperCAmelCase : Tuple = []
for idx in range(len(lowercase ) ):
UpperCAmelCase : Tuple = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=lowercase )
UpperCAmelCase : Union[str, Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase )
else:
UpperCAmelCase : Dict = logits.argmax(dim=1 )
UpperCAmelCase : Tuple = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 595 | 0 |
'''simple docstring'''
import random
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
_UpperCamelCase ={i: [] for i in range(__SCREAMING_SNAKE_CASE )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(__SCREAMING_SNAKE_CASE )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(__SCREAMING_SNAKE_CASE ):
for j in range(i + 1 , __SCREAMING_SNAKE_CASE ):
if random.random() < probability:
graph[i].append(__SCREAMING_SNAKE_CASE )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(__SCREAMING_SNAKE_CASE )
return graph
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return {
i: [j for j in range(__SCREAMING_SNAKE_CASE ) if i != j] for i in range(__SCREAMING_SNAKE_CASE )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 271 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =cva.getAffineTransform(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return cva.warpAffine(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (rows, cols) )
if __name__ == "__main__":
# read original image
__lowerCamelCase : List[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
__lowerCamelCase : List[Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__lowerCamelCase , __lowerCamelCase : Any = gray_img.shape
# set different points to rotate image
__lowerCamelCase : Optional[Any] = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__lowerCamelCase : Any = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__lowerCamelCase : int = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__lowerCamelCase : List[Any] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__lowerCamelCase : int = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__lowerCamelCase : Optional[Any] = plt.figure(1)
__lowerCamelCase : Tuple = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.0_5, right=1.0, top=0.9_5)
plt.show()
| 271 | 1 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _A ( lowerCAmelCase_ : Any ):
"""simple docstring"""
lowerCAmelCase__ = VideoMAEConfig()
set_architecture_configs(lowerCAmelCase_ , lowerCAmelCase_ )
if "finetuned" not in model_name:
lowerCAmelCase__ = False
if "finetuned" in model_name:
lowerCAmelCase__ = "huggingface/label-files"
if "kinetics" in model_name:
lowerCAmelCase__ = 400
lowerCAmelCase__ = "kinetics400-id2label.json"
elif "ssv2" in model_name:
lowerCAmelCase__ = 174
lowerCAmelCase__ = "something-something-v2-id2label.json"
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." )
lowerCAmelCase__ = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="dataset" ) , "r" ) )
lowerCAmelCase__ = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
return config
def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
"""simple docstring"""
if "small" in model_name:
lowerCAmelCase__ = 384
lowerCAmelCase__ = 1536
lowerCAmelCase__ = 12
lowerCAmelCase__ = 16
lowerCAmelCase__ = 12
lowerCAmelCase__ = 3
lowerCAmelCase__ = 192
lowerCAmelCase__ = 768
elif "large" in model_name:
lowerCAmelCase__ = 1024
lowerCAmelCase__ = 4096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
lowerCAmelCase__ = 12
lowerCAmelCase__ = 8
lowerCAmelCase__ = 512
lowerCAmelCase__ = 2048
elif "huge" in model_name:
lowerCAmelCase__ = 1280
lowerCAmelCase__ = 5120
lowerCAmelCase__ = 32
lowerCAmelCase__ = 16
lowerCAmelCase__ = 12
lowerCAmelCase__ = 8
lowerCAmelCase__ = 640
lowerCAmelCase__ = 2560
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" )
def _A ( lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
if "encoder." in name:
lowerCAmelCase__ = name.replace("encoder." , "" )
if "cls_token" in name:
lowerCAmelCase__ = name.replace("cls_token" , "videomae.embeddings.cls_token" )
if "decoder_pos_embed" in name:
lowerCAmelCase__ = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
lowerCAmelCase__ = name.replace("pos_embed" , "videomae.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCAmelCase__ = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowerCAmelCase__ = name.replace("patch_embed.norm" , "videomae.embeddings.norm" )
if "decoder.blocks" in name:
lowerCAmelCase__ = name.replace("decoder.blocks" , "decoder.decoder_layers" )
if "blocks" in name:
lowerCAmelCase__ = name.replace("blocks" , "videomae.encoder.layer" )
if "attn.proj" in name:
lowerCAmelCase__ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "bias" not in name:
lowerCAmelCase__ = name.replace("attn" , "attention.self" )
if "attn" in name:
lowerCAmelCase__ = name.replace("attn" , "attention.attention" )
if "norm1" in name:
lowerCAmelCase__ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCAmelCase__ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCAmelCase__ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCAmelCase__ = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
lowerCAmelCase__ = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
lowerCAmelCase__ = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
lowerCAmelCase__ = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
lowerCAmelCase__ = name.replace("norm.weight" , "videomae.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
lowerCAmelCase__ = name.replace("norm.bias" , "videomae.layernorm.bias" )
if "head" in name and "decoder" not in name:
lowerCAmelCase__ = name.replace("head" , "classifier" )
return name
def _A ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ = orig_state_dict.pop(lowerCAmelCase_ )
if key.startswith("encoder." ):
lowerCAmelCase__ = key.replace("encoder." , "" )
if "qkv" in key:
lowerCAmelCase__ = key.split("." )
if key.startswith("decoder.blocks" ):
lowerCAmelCase__ = config.decoder_hidden_size
lowerCAmelCase__ = int(key_split[2] )
lowerCAmelCase__ = "decoder.decoder_layers."
if "weight" in key:
lowerCAmelCase__ = val[:dim, :]
lowerCAmelCase__ = val[dim : dim * 2, :]
lowerCAmelCase__ = val[-dim:, :]
else:
lowerCAmelCase__ = config.hidden_size
lowerCAmelCase__ = int(key_split[1] )
lowerCAmelCase__ = "videomae.encoder.layer."
if "weight" in key:
lowerCAmelCase__ = val[:dim, :]
lowerCAmelCase__ = val[dim : dim * 2, :]
lowerCAmelCase__ = val[-dim:, :]
else:
lowerCAmelCase__ = val
return orig_state_dict
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
lowerCAmelCase__ = np.load(lowerCAmelCase_ )
return list(lowerCAmelCase_ )
def _A ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str ):
"""simple docstring"""
lowerCAmelCase__ = get_videomae_config(lowerCAmelCase_ )
if "finetuned" in model_name:
lowerCAmelCase__ = VideoMAEForVideoClassification(lowerCAmelCase_ )
else:
lowerCAmelCase__ = VideoMAEForPreTraining(lowerCAmelCase_ )
# download original checkpoint, hosted on Google Drive
lowerCAmelCase__ = "pytorch_model.bin"
gdown.cached_download(lowerCAmelCase_ , lowerCAmelCase_ , quiet=lowerCAmelCase_ )
lowerCAmelCase__ = torch.load(lowerCAmelCase_ , map_location="cpu" )
if "model" in files:
lowerCAmelCase__ = files["model"]
else:
lowerCAmelCase__ = files["module"]
lowerCAmelCase__ = convert_state_dict(lowerCAmelCase_ , lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
model.eval()
# verify model on basic input
lowerCAmelCase__ = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
lowerCAmelCase__ = prepare_video()
lowerCAmelCase__ = image_processor(lowerCAmelCase_ , return_tensors="pt" )
if "finetuned" not in model_name:
lowerCAmelCase__ = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
lowerCAmelCase__ = torch.load(lowerCAmelCase_ )
lowerCAmelCase__ = model(**lowerCAmelCase_ )
lowerCAmelCase__ = outputs.logits
lowerCAmelCase__ = [
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
lowerCAmelCase__ = torch.Size([1, 400] )
lowerCAmelCase__ = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
lowerCAmelCase__ = torch.Size([1, 174] )
lowerCAmelCase__ = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
lowerCAmelCase__ = torch.Size([1, 1408, 1536] )
lowerCAmelCase__ = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
lowerCAmelCase__ = torch.Size([1, 1408, 1536] )
lowerCAmelCase__ = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
lowerCAmelCase__ = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
lowerCAmelCase__ = torch.Size([1, 1408, 1536] )
lowerCAmelCase__ = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
lowerCAmelCase__ = torch.Size([1, 400] )
lowerCAmelCase__ = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
lowerCAmelCase__ = torch.Size([1, 400] )
lowerCAmelCase__ = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
lowerCAmelCase__ = torch.Size([1, 400] )
lowerCAmelCase__ = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
lowerCAmelCase__ = torch.Size([1, 400] )
lowerCAmelCase__ = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
lowerCAmelCase__ = torch.Size([1, 1408, 1536] )
lowerCAmelCase__ = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
lowerCAmelCase__ = torch.Size([1, 174] )
lowerCAmelCase__ = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
lowerCAmelCase__ = torch.Size([1, 1408, 1536] )
lowerCAmelCase__ = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
lowerCAmelCase__ = torch.Size([1, 174] )
lowerCAmelCase__ = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(F'Model name not supported. Should be one of {model_names}' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , lowerCAmelCase_ , atol=1E-4 )
else:
print("Logits:" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , lowerCAmelCase_ , atol=1E-4 )
print("Logits ok!" )
# verify loss, if applicable
if model_name == "videomae-base-short":
lowerCAmelCase__ = outputs.loss
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-4 )
print("Loss ok!" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print("Pushing to the hub..." )
model.push_to_hub(lowerCAmelCase_ , organization="nielsr" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCamelCase = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 61 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def __A ( __lowerCamelCase ) -> Optional[int]:
a = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = StableDiffusionLatentUpscalePipeline
UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''height''',
'''width''',
'''cross_attention_kwargs''',
'''negative_prompt_embeds''',
'''prompt_embeds''',
}
UpperCamelCase__ = PipelineTesterMixin.required_optional_params - {'''num_images_per_prompt'''}
UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase__ = frozenset([] )
UpperCamelCase__ = True
@property
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = 1
a = 4
a = (16, 16)
a = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__magic_name__ )
return image
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
torch.manual_seed(0 )
a = UNetaDConditionModel(
act_fn="""gelu""" , attention_head_dim=8 , norm_num_groups=__magic_name__ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"""KDownBlock2D""",
"""KCrossAttnDownBlock2D""",
"""KCrossAttnDownBlock2D""",
"""KCrossAttnDownBlock2D""",
) , in_channels=8 , mid_block_type=__magic_name__ , only_cross_attention=__magic_name__ , out_channels=5 , resnet_time_scale_shift="""scale_shift""" , time_embedding_type="""fourier""" , timestep_post_act="""gelu""" , up_block_types=("""KCrossAttnUpBlock2D""", """KCrossAttnUpBlock2D""", """KCrossAttnUpBlock2D""", """KUpBlock2D""") , )
a = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
a = EulerDiscreteScheduler(prediction_type="""sample""" )
a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""quick_gelu""" , projection_dim=512 , )
a = CLIPTextModel(__magic_name__ )
a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
a = {
"""unet""": model.eval(),
"""vae""": vae.eval(),
"""scheduler""": scheduler,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Tuple , __magic_name__ :Optional[int]=0 ):
'''simple docstring'''
if str(__magic_name__ ).startswith("""mps""" ):
a = torch.manual_seed(__magic_name__ )
else:
a = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
a = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": self.dummy_image.cpu(),
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = """cpu"""
a = self.get_dummy_components()
a = self.pipeline_class(**__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
a = self.get_dummy_inputs(__magic_name__ )
a = pipe(**__magic_name__ ).images
a = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
a = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
a = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__magic_name__ , 1E-3 )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3E-3 )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = [
"""DDIMScheduler""",
"""DDPMScheduler""",
"""PNDMScheduler""",
"""HeunDiscreteScheduler""",
"""EulerAncestralDiscreteScheduler""",
"""KDPM2DiscreteScheduler""",
"""KDPM2AncestralDiscreteScheduler""",
"""DPMSolverSDEScheduler""",
]
a = self.get_dummy_components()
a = self.pipeline_class(**__magic_name__ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
a = self.get_dummy_inputs(__magic_name__ )
a = 2
a = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
a = getattr(__magic_name__ , scheduler_enum.name )
a = scheduler_cls.from_config(pipe.scheduler.config )
a = pipe(**__magic_name__ )[0]
outputs.append(__magic_name__ )
assert check_same_shape(__magic_name__ )
@require_torch_gpu
@slow
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = torch.manual_seed(33 )
a = StableDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
a = StableDiffusionLatentUpscalePipeline.from_pretrained(
"""stabilityai/sd-x2-latent-upscaler""" , torch_dtype=torch.floataa )
upscaler.to("""cuda""" )
a = """a photo of an astronaut high resolution, unreal engine, ultra realistic"""
a = pipe(__magic_name__ , generator=__magic_name__ , output_type="""latent""" ).images
a = upscaler(
prompt=__magic_name__ , image=__magic_name__ , num_inference_steps=20 , guidance_scale=0 , generator=__magic_name__ , output_type="""np""" , ).images[0]
a = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy""" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = torch.manual_seed(33 )
a = StableDiffusionLatentUpscalePipeline.from_pretrained(
"""stabilityai/sd-x2-latent-upscaler""" , torch_dtype=torch.floataa )
upscaler.to("""cuda""" )
a = """the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"""
a = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png""" )
a = upscaler(
prompt=__magic_name__ , image=__magic_name__ , num_inference_steps=20 , guidance_scale=0 , generator=__magic_name__ , output_type="""np""" , ).images[0]
a = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy""" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 468 | 0 |
snake_case = """Tobias Carryer"""
from time import time
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int=int(time() ) ): # noqa: B008
SCREAMING_SNAKE_CASE : Optional[Any] = multiplier
SCREAMING_SNAKE_CASE : Tuple = increment
SCREAMING_SNAKE_CASE : int = modulo
SCREAMING_SNAKE_CASE : Optional[int] = seed
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : Optional[Any] = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
snake_case = LinearCongruentialGenerator(1_664_525, 1_013_904_223, 2 << 31)
while True:
print(lcg.next_number())
| 488 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple=7 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Any=18 , UpperCAmelCase_ : Optional[int]=30 , UpperCAmelCase_ : Any=400 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Any=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Dict=[0.5, 0.5, 0.5] , ):
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : Dict = num_channels
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : Union[str, Any] = min_resolution
SCREAMING_SNAKE_CASE : Any = max_resolution
SCREAMING_SNAKE_CASE : int = do_resize
SCREAMING_SNAKE_CASE : Union[str, Any] = size if size is not None else {"height": 18, "width": 20}
SCREAMING_SNAKE_CASE : Optional[Any] = do_thumbnail
SCREAMING_SNAKE_CASE : Tuple = do_align_axis
SCREAMING_SNAKE_CASE : List[str] = do_pad
SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize
SCREAMING_SNAKE_CASE : Any = image_mean
SCREAMING_SNAKE_CASE : Any = image_std
def _A ( self : str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : str = DonutImageProcessor if is_vision_available() else None
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : Optional[Any] = DonutImageProcessingTester(self )
@property
def _A ( self : Tuple ):
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "size" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_thumbnail" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_align_long_axis" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_pad" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "image_mean" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "image_std" ) )
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 20} )
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
# Previous config had dimensions in (width, height) order
SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"height": 84, "width": 42} )
def _A ( self : Dict ):
pass
@is_flaky()
def _A ( self : Optional[Any] ):
# Initialize image_processing
SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Dict = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
@is_flaky()
def _A ( self : List[str] ):
# Initialize image_processing
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[int] = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
@is_flaky()
def _A ( self : Dict ):
# Initialize image_processing
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 488 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowerCAmelCase : Optional[Any] = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any ) -> Dict:
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
A_ : Optional[Any] = "lm_head"
A_ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
A_ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
A_ : List[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
A_ : int = value
elif weight_type == "weight_g":
A_ : int = value
elif weight_type == "weight_v":
A_ : str = value
elif weight_type == "bias":
A_ : Union[str, Any] = value
else:
A_ : Optional[int] = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : int ) -> Optional[int]:
A_ : List[Any] = []
A_ : str = fairseq_model.state_dict()
A_ : int = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
A_ : Any = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , )
A_ : Dict = True
else:
for key, mapped_key in MAPPING.items():
A_ : Optional[int] = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
A_ : Tuple = True
if "*" in mapped_key:
A_ : Any = name.split(_lowerCAmelCase )[0].split("." )[-2]
A_ : Any = mapped_key.replace("*" , _lowerCAmelCase )
if "weight_g" in name:
A_ : List[Any] = "weight_g"
elif "weight_v" in name:
A_ : List[str] = "weight_v"
elif "bias" in name:
A_ : Any = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A_ : Optional[int] = "weight"
else:
A_ : Any = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"Unused weights: {unused_weights}" )
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any ) -> str:
A_ : int = full_name.split("conv_layers." )[-1]
A_ : str = name.split("." )
A_ : Union[str, Any] = int(items[0] )
A_ : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
A_ : int = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
A_ : Optional[Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
A_ : Any = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
A_ : Optional[Any] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Dict=True ) -> Dict:
if config_path is not None:
A_ : List[Any] = UniSpeechConfig.from_pretrained(_lowerCAmelCase )
else:
A_ : Tuple = UniSpeechConfig()
if is_finetuned:
if dict_path:
A_ : str = Dictionary.load_from_json(_lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A_ : str = target_dict.pad_index
A_ : Dict = target_dict.bos_index
A_ : List[Any] = target_dict.eos_index
A_ : Dict = len(target_dict.symbols )
A_ : Tuple = os.path.join(_lowerCAmelCase , "vocab.json" )
if not os.path.isdir(_lowerCAmelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCAmelCase ) )
return
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
A_ : Any = target_dict.indices
# fairseq has the <pad> and <s> switched
A_ : Tuple = 42
A_ : Dict = 43
with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
A_ : Union[str, Any] = WavaVecaPhonemeCTCTokenizer(
_lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCAmelCase , )
A_ : Dict = True if config.feat_extract_norm == "layer" else False
A_ : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
A_ : Tuple = WavaVecaProcessor(feature_extractor=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
A_ : int = UniSpeechForCTC(_lowerCAmelCase )
else:
A_ : Any = UniSpeechForPreTraining(_lowerCAmelCase )
if is_finetuned:
A_ , A_ , A_ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
A_ , A_ , A_ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
A_ : List[str] = model[0].eval()
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
hf_unispeech.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
_lowerCAmelCase : Optional[int] = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 454 |
from __future__ import annotations
import math
import random
from typing import Any
class __magic_name__ :
"""simple docstring"""
def __init__( self :Tuple ):
'''simple docstring'''
A_ : list[Any] = []
A_ : int = 0
A_ : int = 0
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
return self.head == self.tail
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Any ):
'''simple docstring'''
self.data.append(snake_case )
A_ : int = self.tail + 1
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : int = self.data[self.head]
A_ : Tuple = self.head + 1
return ret
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
return self.tail - self.head
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
print(self.data )
print("**************" )
print(self.data[self.head : self.tail] )
class __magic_name__ :
"""simple docstring"""
def __init__( self :Any , snake_case :Any ):
'''simple docstring'''
A_ : Any = data
A_ : MyNode | None = None
A_ : MyNode | None = None
A_ : int = 1
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
return self.data
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
return self.left
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
return self.right
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
return self.height
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :Any ):
'''simple docstring'''
A_ : Optional[int] = data
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :MyNode | None ):
'''simple docstring'''
A_ : Tuple = node
def SCREAMING_SNAKE_CASE ( self :str , snake_case :MyNode | None ):
'''simple docstring'''
A_ : str = node
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :int ):
'''simple docstring'''
A_ : Optional[int] = height
def __snake_case ( _lowerCAmelCase : MyNode | None ) -> int:
if node is None:
return 0
return node.get_height()
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : int ) -> int:
if a > b:
return a
return b
def __snake_case ( _lowerCAmelCase : MyNode ) -> MyNode:
print("left rotation node:" , node.get_data() )
A_ : Optional[int] = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(_lowerCAmelCase )
A_ : Any = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_lowerCAmelCase )
A_ : List[str] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_lowerCAmelCase )
return ret
def __snake_case ( _lowerCAmelCase : MyNode ) -> MyNode:
print("right rotation node:" , node.get_data() )
A_ : Union[str, Any] = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(_lowerCAmelCase )
A_ : int = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_lowerCAmelCase )
A_ : Dict = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_lowerCAmelCase )
return ret
def __snake_case ( _lowerCAmelCase : MyNode ) -> MyNode:
A_ : List[Any] = node.get_left()
assert left_child is not None
node.set_left(left_rotation(_lowerCAmelCase ) )
return right_rotation(_lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : MyNode ) -> MyNode:
A_ : List[Any] = node.get_right()
assert right_child is not None
node.set_right(right_rotation(_lowerCAmelCase ) )
return left_rotation(_lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : MyNode | None , _lowerCAmelCase : Any ) -> MyNode | None:
if node is None:
return MyNode(_lowerCAmelCase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , _lowerCAmelCase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
A_ : Any = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
A_ : Optional[int] = right_rotation(_lowerCAmelCase )
else:
A_ : int = lr_rotation(_lowerCAmelCase )
else:
node.set_right(insert_node(node.get_right() , _lowerCAmelCase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
A_ : Dict = node.get_right()
assert right_child is not None
if data < right_child.get_data():
A_ : Optional[Any] = rl_rotation(_lowerCAmelCase )
else:
A_ : Dict = left_rotation(_lowerCAmelCase )
A_ : Optional[int] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_lowerCAmelCase )
return node
def __snake_case ( _lowerCAmelCase : MyNode ) -> Any:
while True:
A_ : Any = root.get_right()
if right_child is None:
break
A_ : Optional[Any] = right_child
return root.get_data()
def __snake_case ( _lowerCAmelCase : MyNode ) -> Any:
while True:
A_ : Optional[Any] = root.get_left()
if left_child is None:
break
A_ : int = left_child
return root.get_data()
def __snake_case ( _lowerCAmelCase : MyNode , _lowerCAmelCase : Any ) -> MyNode | None:
A_ : Optional[int] = root.get_left()
A_ : Dict = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
A_ : int = get_left_most(_lowerCAmelCase )
root.set_data(_lowerCAmelCase )
root.set_right(del_node(_lowerCAmelCase , _lowerCAmelCase ) )
elif left_child is not None:
A_ : Tuple = left_child
elif right_child is not None:
A_ : Any = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("No such data" )
return root
else:
root.set_left(del_node(_lowerCAmelCase , _lowerCAmelCase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(_lowerCAmelCase , _lowerCAmelCase ) )
if get_height(_lowerCAmelCase ) - get_height(_lowerCAmelCase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
A_ : Dict = left_rotation(_lowerCAmelCase )
else:
A_ : List[Any] = rl_rotation(_lowerCAmelCase )
elif get_height(_lowerCAmelCase ) - get_height(_lowerCAmelCase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
A_ : Optional[Any] = right_rotation(_lowerCAmelCase )
else:
A_ : Tuple = lr_rotation(_lowerCAmelCase )
A_ : List[str] = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(_lowerCAmelCase )
return root
class __magic_name__ :
"""simple docstring"""
def __init__( self :List[str] ):
'''simple docstring'''
A_ : MyNode | None = None
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
return get_height(self.root )
def SCREAMING_SNAKE_CASE ( self :str , snake_case :Any ):
'''simple docstring'''
print("insert:" + str(snake_case ) )
A_ : List[Any] = insert_node(self.root , snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Any ):
'''simple docstring'''
print("delete:" + str(snake_case ) )
if self.root is None:
print("Tree is empty!" )
return
A_ : Dict = del_node(self.root , snake_case )
def __str__( self :Optional[int] , ): # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
A_ : Optional[Any] = ""
A_ : Optional[int] = MyQueue()
q.push(self.root )
A_ : Optional[int] = self.get_height()
if layer == 0:
return output
A_ : Dict = 0
while not q.is_empty():
A_ : Optional[int] = q.pop()
A_ : Union[str, Any] = " " * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(snake_case )
q.push(snake_case )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
A_ : str = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , snake_case ) - 1:
A_ : int = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __snake_case ( ) -> None:
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
_lowerCAmelCase : int = AVLtree()
_lowerCAmelCase : Optional[int] = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 454 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''MobileViTFeatureExtractor''']
__magic_name__ = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 314 |
from __future__ import annotations
import os
from collections.abc import Mapping
__magic_name__ = tuple[int, int]
class a__ :
"""simple docstring"""
def __init__( self :List[Any] , lowercase__ :set[int] , lowercase__ :Mapping[EdgeT, int] ):
lowercase = vertices
lowercase = {
(min(lowercase__ ), max(lowercase__ )): weight for edge, weight in edges.items()
}
def __UpperCAmelCase ( self :Union[str, Any] , lowercase__ :EdgeT , lowercase__ :int ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowercase = weight
def __UpperCAmelCase ( self :str ):
lowercase = Graph({min(self.vertices )} , {} )
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowercase = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowercase = edge
lowercase = weight
subgraph.add_edge(lowercase__ , lowercase__ )
return subgraph
def __snake_case ( _UpperCAmelCase = "p107_network.txt" ):
"""simple docstring"""
lowercase = os.path.abspath(os.path.dirname(_UpperCAmelCase ) )
lowercase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
lowercase = {}
lowercase = 42
lowercase = 42
lowercase = 42
with open(_UpperCAmelCase ) as f:
lowercase = f.read().strip().split('\n' )
lowercase = [line.split(',' ) for line in data]
for edgea in range(1 , len(_UpperCAmelCase ) ):
for edgea in range(_UpperCAmelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
lowercase = int(adjaceny_matrix[edgea][edgea] )
lowercase = Graph(set(range(len(_UpperCAmelCase ) ) ) , _UpperCAmelCase )
lowercase = graph.prims_algorithm()
lowercase = sum(graph.edges.values() )
lowercase = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"""{solution() = }""")
| 314 | 1 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : bool = False ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = F'''Expected string as input, found {type(__lowerCAmelCase )}'''
raise ValueError(__lowerCAmelCase )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = F'''Expected boolean as use_pascal parameter, found {type(__lowerCAmelCase )}'''
raise ValueError(__lowerCAmelCase )
lowerCamelCase__ = input_str.split("""_""" )
lowerCamelCase__ = 0 if use_pascal else 1
lowerCamelCase__ = words[start_index:]
lowerCamelCase__ = [word[0].upper() + word[1:] for word in words_to_capitalize]
lowerCamelCase__ = """""" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 50 |
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
A_ : int =logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase__ )
class __a ( lowerCAmelCase__ ):
def __init__( self , *a__ , **a__ ):
super().__init__(*a__ , **a__ )
self.check_model_type(a__ )
def snake_case_ ( self , a__=None , a__=None , a__=None , **a__ ):
_lowerCamelCase , _lowerCamelCase = {}, {}
if padding is not None:
_lowerCamelCase = padding
if truncation is not None:
_lowerCamelCase = truncation
if top_k is not None:
_lowerCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , a__ , a__ = None , **a__ ):
if isinstance(a__ , (Image.Image, str) ) and isinstance(a__ , a__ ):
_lowerCamelCase = {'image': image, 'question': question}
else:
_lowerCamelCase = image
_lowerCamelCase = super().__call__(a__ , **a__ )
return results
def snake_case_ ( self , a__ , a__=False , a__=False ):
_lowerCamelCase = load_image(inputs['image'] )
_lowerCamelCase = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=a__ , truncation=a__ )
_lowerCamelCase = self.image_processor(images=a__ , return_tensors=self.framework )
model_inputs.update(a__ )
return model_inputs
def snake_case_ ( self , a__ ):
_lowerCamelCase = self.model(**a__ )
return model_outputs
def snake_case_ ( self , a__ , a__=5 ):
if top_k > self.model.config.num_labels:
_lowerCamelCase = self.model.config.num_labels
if self.framework == "pt":
_lowerCamelCase = model_outputs.logits.sigmoid()[0]
_lowerCamelCase , _lowerCamelCase = probs.topk(a__ )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
_lowerCamelCase = scores.tolist()
_lowerCamelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(a__ , a__ )]
| 650 | 0 |
"""simple docstring"""
from datetime import datetime
import requests
def snake_case__ ( _snake_case : str ):
"""simple docstring"""
UpperCamelCase__ = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url="
UpperCamelCase__ = requests.get(base_url + url ).json()[0]["urls"][0]["src"]
return requests.get(_snake_case ).content
if __name__ == "__main__":
A : int = input('Enter Video/IGTV url: ').strip()
A : Optional[int] = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(F"Done. Video saved to disk as {file_name}.") | 304 | """simple docstring"""
class lowerCAmelCase :
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCamelCase_ :list ) -> None:
"""simple docstring"""
UpperCamelCase__ = set_counts
UpperCamelCase__ = max(lowerCamelCase_ )
UpperCamelCase__ = len(lowerCamelCase_ )
UpperCamelCase__ = [1] * num_sets
UpperCamelCase__ = list(range(lowerCamelCase_ ) )
def lowerCamelCase__ ( self :str , lowerCamelCase_ :int , lowerCamelCase_ :int ) -> bool:
"""simple docstring"""
UpperCamelCase__ = self.get_parent(lowerCamelCase_ )
UpperCamelCase__ = self.get_parent(lowerCamelCase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
UpperCamelCase__ = 0
UpperCamelCase__ = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
UpperCamelCase__ = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
UpperCamelCase__ = 0
UpperCamelCase__ = src_parent
UpperCamelCase__ = self.set_counts[src_parent]
UpperCamelCase__ = max(self.max_set , lowerCamelCase_ )
return True
def lowerCamelCase__ ( self :int , lowerCamelCase_ :int ) -> int:
"""simple docstring"""
if self.parents[disj_set] == disj_set:
return disj_set
UpperCamelCase__ = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set] | 304 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 181 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
_lowercase =0
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
_lowercase =AutoImageProcessor.from_pretrained('openai/clip-vit-base-patch32')
self.assertIsInstance(snake_case, snake_case)
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase =Path(snake_case) / 'preprocessor_config.json'
_lowercase =Path(snake_case) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'}, open(snake_case, 'w'), )
json.dump({'model_type': 'clip'}, open(snake_case, 'w'))
_lowercase =AutoImageProcessor.from_pretrained(snake_case)
self.assertIsInstance(snake_case, snake_case)
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase =Path(snake_case) / 'preprocessor_config.json'
_lowercase =Path(snake_case) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'}, open(snake_case, 'w'), )
json.dump({'model_type': 'clip'}, open(snake_case, 'w'))
_lowercase =AutoImageProcessor.from_pretrained(snake_case)
self.assertIsInstance(snake_case, snake_case)
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase =CLIPConfig()
# Create a dummy config file with image_proceesor_type
_lowercase =Path(snake_case) / 'preprocessor_config.json'
_lowercase =Path(snake_case) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'}, open(snake_case, 'w'), )
json.dump({'model_type': 'clip'}, open(snake_case, 'w'))
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
_lowercase =AutoImageProcessor.from_pretrained(snake_case).to_dict()
config_dict.pop('image_processor_type')
_lowercase =CLIPImageProcessor(**snake_case)
# save in new folder
model_config.save_pretrained(snake_case)
config.save_pretrained(snake_case)
_lowercase =AutoImageProcessor.from_pretrained(snake_case)
# make sure private variable is not incorrectly saved
_lowercase =json.loads(config.to_json_string())
self.assertTrue('_processor_class' not in dict_as_saved)
self.assertIsInstance(snake_case, snake_case)
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase =Path(snake_case) / 'preprocessor_config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'}, open(snake_case, 'w'), )
_lowercase =AutoImageProcessor.from_pretrained(snake_case)
self.assertIsInstance(snake_case, snake_case)
def UpperCamelCase__ ( self :int):
"""simple docstring"""
with self.assertRaisesRegex(
snake_case, 'clip-base is not a local folder and is not a valid model identifier'):
_lowercase =AutoImageProcessor.from_pretrained('clip-base')
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
with self.assertRaisesRegex(
snake_case, r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
_lowercase =AutoImageProcessor.from_pretrained(snake_case, revision='aaaaaa')
def UpperCamelCase__ ( self :List[Any]):
"""simple docstring"""
with self.assertRaisesRegex(
snake_case, 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.', ):
_lowercase =AutoImageProcessor.from_pretrained('hf-internal-testing/config-no-model')
def UpperCamelCase__ ( self :Optional[Any]):
"""simple docstring"""
with self.assertRaises(snake_case):
_lowercase =AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor')
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case):
_lowercase =AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor', trust_remote_code=snake_case)
_lowercase =AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor', trust_remote_code=snake_case)
self.assertEqual(image_processor.__class__.__name__, 'NewImageProcessor')
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(snake_case)
_lowercase =AutoImageProcessor.from_pretrained(snake_case, trust_remote_code=snake_case)
self.assertEqual(reloaded_image_processor.__class__.__name__, 'NewImageProcessor')
def UpperCamelCase__ ( self :Optional[Any]):
"""simple docstring"""
try:
AutoConfig.register('custom', snake_case)
AutoImageProcessor.register(snake_case, snake_case)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case):
AutoImageProcessor.register(snake_case, snake_case)
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase =Path(snake_case) / 'preprocessor_config.json'
_lowercase =Path(snake_case) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'}, open(snake_case, 'w'), )
json.dump({'model_type': 'clip'}, open(snake_case, 'w'))
_lowercase =CustomImageProcessor.from_pretrained(snake_case)
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(snake_case)
_lowercase =AutoImageProcessor.from_pretrained(snake_case)
self.assertIsInstance(snake_case, snake_case)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self :str):
"""simple docstring"""
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] =True
try:
AutoConfig.register('custom', snake_case)
AutoImageProcessor.register(snake_case, snake_case)
# If remote code is not set, the default is to use local
_lowercase =AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor')
self.assertEqual(image_processor.__class__.__name__, 'NewImageProcessor')
self.assertTrue(image_processor.is_local)
# If remote code is disabled, we load the local one.
_lowercase =AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor', trust_remote_code=snake_case)
self.assertEqual(image_processor.__class__.__name__, 'NewImageProcessor')
self.assertTrue(image_processor.is_local)
# If remote is enabled, we load from the Hub
_lowercase =AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor', trust_remote_code=snake_case)
self.assertEqual(image_processor.__class__.__name__, 'NewImageProcessor')
self.assertTrue(not hasattr(snake_case, 'is_local'))
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 181 | 1 |
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
def UpperCamelCase ( lowercase_ ) -> Dict:
'''simple docstring'''
lowercase__ : str = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
lowercase__ : int = 1_28
elif "12-12" in model_name:
lowercase__ : Any = 12
lowercase__ : List[str] = 12
elif "14-14" in model_name:
lowercase__ : int = 14
lowercase__ : Dict = 14
elif "16-16" in model_name:
lowercase__ : Tuple = 16
lowercase__ : List[str] = 16
else:
raise ValueError("""Model not supported""" )
lowercase__ : Union[str, Any] = """huggingface/label-files"""
if "speech-commands" in model_name:
lowercase__ : List[Any] = 35
lowercase__ : Optional[Any] = """speech-commands-v2-id2label.json"""
else:
lowercase__ : Union[str, Any] = 5_27
lowercase__ : Tuple = """audioset-id2label.json"""
lowercase__ : Tuple = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type="""dataset""" ) , """r""" ) )
lowercase__ : str = {int(lowercase_ ): v for k, v in idalabel.items()}
lowercase__ : Any = idalabel
lowercase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase ( lowercase_ ) -> str:
'''simple docstring'''
if "module.v" in name:
lowercase__ : List[Any] = name.replace("""module.v""" , """audio_spectrogram_transformer""" )
if "cls_token" in name:
lowercase__ : Optional[Any] = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "dist_token" in name:
lowercase__ : List[Any] = name.replace("""dist_token""" , """embeddings.distillation_token""" )
if "pos_embed" in name:
lowercase__ : Union[str, Any] = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
lowercase__ : Union[str, Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
# transformer blocks
if "blocks" in name:
lowercase__ : Optional[int] = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
lowercase__ : Optional[int] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowercase__ : Optional[int] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowercase__ : str = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase__ : List[Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowercase__ : Optional[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase__ : Optional[int] = name.replace("""mlp.fc2""" , """output.dense""" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
lowercase__ : List[Any] = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" )
# classifier head
if "module.mlp_head.0" in name:
lowercase__ : List[Any] = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" )
if "module.mlp_head.1" in name:
lowercase__ : Dict = name.replace("""module.mlp_head.1""" , """classifier.dense""" )
return name
def UpperCamelCase ( lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase__ : Tuple = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
lowercase__ : Dict = key.split(""".""" )
lowercase__ : str = int(key_split[3] )
lowercase__ : Tuple = config.hidden_size
if "weight" in key:
lowercase__ : Tuple = val[:dim, :]
lowercase__ : str = val[dim : dim * 2, :]
lowercase__ : Dict = val[-dim:, :]
else:
lowercase__ : Optional[int] = val[:dim]
lowercase__ : List[str] = val[dim : dim * 2]
lowercase__ : str = val[-dim:]
else:
lowercase__ : str = val
return orig_state_dict
def UpperCamelCase ( lowercase_ ) -> Tuple:
'''simple docstring'''
lowercase__ : Dict = [
"""module.v.head.weight""",
"""module.v.head.bias""",
"""module.v.head_dist.weight""",
"""module.v.head_dist.bias""",
]
for k in ignore_keys:
state_dict.pop(lowercase_ , lowercase_ )
@torch.no_grad()
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_=False ) -> Tuple:
'''simple docstring'''
lowercase__ : str = get_audio_spectrogram_transformer_config(lowercase_ )
lowercase__ : str = {
"""ast-finetuned-audioset-10-10-0.4593""": (
"""https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.450""": (
"""https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448""": (
"""https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448-v2""": (
"""https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"""
),
"""ast-finetuned-audioset-12-12-0.447""": (
"""https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"""
),
"""ast-finetuned-audioset-14-14-0.443""": (
"""https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"""
),
"""ast-finetuned-audioset-16-16-0.442""": (
"""https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"""
),
"""ast-finetuned-speech-commands-v2""": (
"""https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"""
),
}
# load original state_dict
lowercase__ : List[str] = model_name_to_url[model_name]
lowercase__ : Dict = torch.hub.load_state_dict_from_url(lowercase_ , map_location="""cpu""" )
# remove some keys
remove_keys(lowercase_ )
# rename some keys
lowercase__ : List[str] = convert_state_dict(lowercase_ , lowercase_ )
# load 🤗 model
lowercase__ : int = ASTForAudioClassification(lowercase_ )
model.eval()
model.load_state_dict(lowercase_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
lowercase__ : str = -4.267_7393 if """speech-commands""" not in model_name else -6.84_5978
lowercase__ : Tuple = 4.568_9974 if """speech-commands""" not in model_name else 5.565_4526
lowercase__ : Dict = 10_24 if """speech-commands""" not in model_name else 1_28
lowercase__ : List[str] = ASTFeatureExtractor(mean=lowercase_ , std=lowercase_ , max_length=lowercase_ )
if "speech-commands" in model_name:
lowercase__ : Dict = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" )
lowercase__ : Tuple = dataset[0]["""audio"""]["""array"""]
else:
lowercase__ : Dict = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , )
lowercase__ : int = torchaudio.load(lowercase_ )
lowercase__ : int = waveform.squeeze().numpy()
lowercase__ : str = feature_extractor(lowercase_ , sampling_rate=1_60_00 , return_tensors="""pt""" )
# forward pass
lowercase__ : Optional[Any] = model(**lowercase_ )
lowercase__ : List[str] = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
lowercase__ : List[str] = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
lowercase__ : List[Any] = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
lowercase__ : List[Any] = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
lowercase__ : Tuple = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
lowercase__ : Union[str, Any] = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
lowercase__ : List[Any] = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
lowercase__ : int = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
lowercase__ : int = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError("""Unknown model name""" )
if not torch.allclose(logits[0, :3] , lowercase_ , atol=1E-4 ):
raise ValueError("""Logits don't match""" )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase_ )
print(F'Saving feature extractor to {pytorch_dump_folder_path}' )
feature_extractor.save_pretrained(lowercase_ )
if push_to_hub:
print("""Pushing model and feature extractor to the hub...""" )
model.push_to_hub(F'MIT/{model_name}' )
feature_extractor.push_to_hub(F'MIT/{model_name}' )
if __name__ == "__main__":
lowerCamelCase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase__ : Any = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 721 |
import sys
lowerCamelCase__ : List[Any] = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def UpperCamelCase ( lowercase_ = N ) -> int:
'''simple docstring'''
lowercase__ : int = -sys.maxsize - 1
for i in range(len(lowercase_ ) - 12 ):
lowercase__ : Optional[int] = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
lowercase__ : int = product
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 495 | 0 |
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = mock.Mock()
__snake_case = 500
__snake_case = {}
__snake_case = HTTPError
__snake_case = {}
# Download this model to make sure it's in the cache.
__snake_case = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=a_ ) as mock_head:
__snake_case = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = mock.Mock()
__snake_case = 500
__snake_case = {}
__snake_case = HTTPError
__snake_case = {}
# Download this model to make sure it's in the cache.
__snake_case = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=a_ ) as mock_head:
__snake_case = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def A ( self : Optional[Any] ):
"""simple docstring"""
try:
__snake_case = tempfile.mktemp()
with open(a_ , "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" , a_ )
__snake_case = AlbertTokenizer.from_pretrained(a_ )
finally:
os.remove(a_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" , "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" , a_ )
__snake_case = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def A ( self : str ):
"""simple docstring"""
__snake_case = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def A ( cls : List[Any] ):
"""simple docstring"""
__snake_case = TOKEN
HfFolder.save_token(a_ )
@classmethod
def A ( cls : List[Any] ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def A ( self : int ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case = os.path.join(a_ , "vocab.txt" )
with open(a_ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
__snake_case = BertTokenizer(a_ )
tokenizer.push_to_hub("test-tokenizer" , use_auth_token=self._token )
__snake_case = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(a_ , repo_id="test-tokenizer" , push_to_hub=a_ , use_auth_token=self._token )
__snake_case = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def A ( self : int ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case = os.path.join(a_ , "vocab.txt" )
with open(a_ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
__snake_case = BertTokenizer(a_ )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" , use_auth_token=self._token )
__snake_case = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
a_ , repo_id="valid_org/test-tokenizer-org" , push_to_hub=a_ , use_auth_token=self._token )
__snake_case = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def A ( self : List[str] ):
"""simple docstring"""
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case = os.path.join(a_ , "vocab.txt" )
with open(a_ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
__snake_case = CustomTokenizer(a_ )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
__snake_case = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=a_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case = os.path.join(a_ , "vocab.txt" )
with open(a_ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
__snake_case = BertTokenizerFast.from_pretrained(a_ )
bert_tokenizer.save_pretrained(a_ )
__snake_case = CustomTokenizerFast.from_pretrained(a_ )
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
__snake_case = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=a_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizerFast" )
__snake_case = AutoTokenizer.from_pretrained(
f'''{USER}/test-dynamic-tokenizer''' , use_fast=a_ , trust_remote_code=a_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def A ( self : str ):
"""simple docstring"""
__snake_case = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS]", " This is a ", "extra_id_100"] )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) , ["A", "BC"] )
self.assertEqual(trie.split("BCA" ) , ["BC", "A"] )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def A ( self : str ):
"""simple docstring"""
__snake_case = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) , ["AB", "C"] )
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) , ["ABC", "D"] )
def A ( self : Any ):
"""simple docstring"""
__snake_case = Trie()
__snake_case = trie.cut_text("ABC" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(a_ , ["AB", "C"] )
| 69 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCamelCase__ = logging.get_logger(__name__)
def lowerCamelCase__ ( __A :Optional[int] ,__A :Any ,__A :str ):
"""simple docstring"""
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def lowerCamelCase__ ( __A :np.ndarray ,__A :Optional[str] ,__A :Optional[str] ):
"""simple docstring"""
__snake_case = to_pil_image(__A )
__snake_case , __snake_case = pil_image.size
__snake_case = pytesseract.image_to_data(__A ,lang=__A ,output_type="""dict""" ,config=__A )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
__snake_case = [idx for idx, word in enumerate(__A ) if not word.strip()]
__snake_case = [word for idx, word in enumerate(__A ) if idx not in irrelevant_indices]
__snake_case = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
__snake_case = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
__snake_case = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
__snake_case = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
__snake_case = []
for x, y, w, h in zip(__A ,__A ,__A ,__A ):
__snake_case = [x, y, x + w, y + h]
actual_boxes.append(__A )
# finally, normalize the bounding boxes
__snake_case = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__A ,__A ,__A ) )
assert len(__A ) == len(__A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __snake_case ( snake_case__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ["pixel_values"]
def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BILINEAR , _UpperCamelCase = True , _UpperCamelCase = 1 / 2_55 , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = "" , **_UpperCamelCase , ) -> None:
"""simple docstring"""
super().__init__(**_UpperCamelCase )
__snake_case = size if size is not None else {"""height""": 2_24, """width""": 2_24}
__snake_case = get_size_dict(_UpperCamelCase )
__snake_case = do_resize
__snake_case = size
__snake_case = resample
__snake_case = do_rescale
__snake_case = rescale_value
__snake_case = do_normalize
__snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
__snake_case = apply_ocr
__snake_case = ocr_lang
__snake_case = tesseract_config
def a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BILINEAR , _UpperCamelCase = None , **_UpperCamelCase , ) -> np.ndarray:
"""simple docstring"""
__snake_case = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
__snake_case = (size["""height"""], size["""width"""])
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ) -> np.ndarray:
"""simple docstring"""
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ) -> np.ndarray:
"""simple docstring"""
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def a ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase=None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ) -> PIL.Image.Image:
"""simple docstring"""
__snake_case = do_resize if do_resize is not None else self.do_resize
__snake_case = size if size is not None else self.size
__snake_case = get_size_dict(_UpperCamelCase )
__snake_case = resample if resample is not None else self.resample
__snake_case = do_rescale if do_rescale is not None else self.do_rescale
__snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case = do_normalize if do_normalize is not None else self.do_normalize
__snake_case = image_mean if image_mean is not None else self.image_mean
__snake_case = image_std if image_std is not None else self.image_std
__snake_case = apply_ocr if apply_ocr is not None else self.apply_ocr
__snake_case = ocr_lang if ocr_lang is not None else self.ocr_lang
__snake_case = tesseract_config if tesseract_config is not None else self.tesseract_config
__snake_case = make_list_of_images(_UpperCamelCase )
if not valid_images(_UpperCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""If do_normalize is True, image_mean and image_std must be specified.""" )
# All transformations expect numpy arrays.
__snake_case = [to_numpy_array(_UpperCamelCase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , """pytesseract""" )
__snake_case = []
__snake_case = []
for image in images:
__snake_case , __snake_case = apply_tesseract(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
words_batch.append(_UpperCamelCase )
boxes_batch.append(_UpperCamelCase )
if do_resize:
__snake_case = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_rescale:
__snake_case = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
__snake_case = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
__snake_case = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
__snake_case = BatchFeature(data={"""pixel_values""": images} , tensor_type=_UpperCamelCase )
if apply_ocr:
__snake_case = words_batch
__snake_case = boxes_batch
return data
| 268 | 0 |
from ..utils import DummyObject, requires_backends
class __lowerCamelCase ( metaclass=lowercase__ ):
"""simple docstring"""
snake_case__ = ["note_seq"]
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : int ) -> Dict:
requires_backends(self , ["note_seq"] )
@classmethod
def a ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
requires_backends(cls , ["note_seq"] )
@classmethod
def a ( cls : Tuple , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : str ) -> int:
requires_backends(cls , ["note_seq"] )
| 705 |
from __future__ import annotations
def _A ( lowerCAmelCase_ : list[int | str] ):
"""simple docstring"""
create_state_space_tree(lowerCAmelCase_ , [] , 0 , [0 for i in range(len(lowerCAmelCase_ ) )] )
def _A ( lowerCAmelCase_ : list[int | str] , lowerCAmelCase_ : list[int | str] , lowerCAmelCase_ : int , lowerCAmelCase_ : list[int] , ):
"""simple docstring"""
if index == len(lowerCAmelCase_ ):
print(lowerCAmelCase_ )
return
for i in range(len(lowerCAmelCase_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
lowerCAmelCase__ = True
create_state_space_tree(lowerCAmelCase_ , lowerCAmelCase_ , index + 1 , lowerCAmelCase_ )
current_sequence.pop()
lowerCAmelCase__ = False
UpperCamelCase = [3, 1, 2, 4]
generate_all_permutations(sequence)
UpperCamelCase = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 125 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'''configuration_upernet''': ['''UperNetConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''UperNetForSemanticSegmentation''',
'''UperNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 |
from __future__ import annotations
import requests
def _UpperCAmelCase (UpperCamelCase__ : str ):
_A : List[Any] = f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"
return requests.get(UpperCamelCase__ ).json()
def _UpperCAmelCase (UpperCamelCase__ : int = 10 ):
_A : Dict = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
_A : Tuple = requests.get(UpperCamelCase__ ).json()[:max_stories]
return [get_hackernews_story(UpperCamelCase__ ) for story_id in story_ids]
def _UpperCAmelCase (UpperCamelCase__ : int = 10 ):
_A : Dict = hackernews_top_stories(UpperCamelCase__ )
return "\n".join("* [{title}]({url})".format(**UpperCamelCase__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 503 | 0 |
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class snake_case :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=14 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=512 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_token_type_ids
__lowercase = use_input_mask
__lowercase = use_labels
__lowercase = use_mc_token_ids
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
__lowercase = self.vocab_size - 1
def snake_case__ ( self ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
if self.use_mc_token_ids:
__lowercase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
__lowercase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def snake_case__ ( self ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ ):
__lowercase = CTRLModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
model(snake_case_ , token_type_ids=snake_case_ , head_mask=snake_case_ )
model(snake_case_ , token_type_ids=snake_case_ )
__lowercase = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ ):
__lowercase = CTRLLMHeadModel(snake_case_ )
model.to(snake_case_ )
model.eval()
__lowercase = model(snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self ):
__lowercase = self.prepare_config_and_inputs()
(
__lowercase
) = config_and_inputs
__lowercase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask}
return config, inputs_dict
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ ):
__lowercase = self.num_labels
__lowercase = CTRLForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = model(snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class snake_case ( _snake_case ,_snake_case ,_snake_case ,unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
__lowerCAmelCase = (CTRLLMHeadModel,) if is_torch_available() else ()
__lowerCAmelCase = (
{
"""feature-extraction""": CTRLModel,
"""text-classification""": CTRLForSequenceClassification,
"""text-generation""": CTRLLMHeadModel,
"""zero-shot""": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = False
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def snake_case__ ( self ):
__lowercase = CTRLModelTester(self )
__lowercase = ConfigTester(self , config_class=snake_case_ , n_embd=37 )
def snake_case__ ( self ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self ):
self.config_tester.run_common_tests()
def snake_case__ ( self ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*snake_case_ )
def snake_case__ ( self ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*snake_case_ )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def snake_case__ ( self ):
pass
@slow
def snake_case__ ( self ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = CTRLModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def snake_case__ ( self ):
pass
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def snake_case__ ( self ):
__lowercase = CTRLLMHeadModel.from_pretrained("ctrl" )
model.to(snake_case_ )
__lowercase = torch.tensor(
[[1_1859, 0, 1611, 8]] , dtype=torch.long , device=snake_case_ ) # Legal the president is
__lowercase = [
1_1859,
0,
1611,
8,
5,
150,
2_6449,
2,
19,
348,
469,
3,
2595,
48,
2_0740,
24_6533,
24_6533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__lowercase = model.generate(snake_case_ , do_sample=snake_case_ )
self.assertListEqual(output_ids[0].tolist() , snake_case_ )
| 721 | from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> list[float]:
'''simple docstring'''
__lowercase , __lowercase = coefficient_matrix.shape
__lowercase , __lowercase = constant_matrix.shape
if rowsa != colsa:
__lowercase = f'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(_UpperCAmelCase )
if colsa != 1:
__lowercase = f'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(_UpperCAmelCase )
if rowsa != rowsa:
__lowercase = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
f'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(_UpperCAmelCase )
if len(_UpperCAmelCase ) != rowsa:
__lowercase = (
"Number of initial values must be equal to number of rows in coefficient "
f'''matrix but received {len(_UpperCAmelCase )} and {rowsa}'''
)
raise ValueError(_UpperCAmelCase )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
__lowercase = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__lowercase , __lowercase = table.shape
strictly_diagonally_dominant(_UpperCAmelCase )
# Iterates the whole matrix for given number of times
for _ in range(_UpperCAmelCase ):
__lowercase = []
for row in range(_UpperCAmelCase ):
__lowercase = 0
for col in range(_UpperCAmelCase ):
if col == row:
__lowercase = table[row][col]
elif col == cols - 1:
__lowercase = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__lowercase = (temp + val) / denom
new_val.append(_UpperCAmelCase )
__lowercase = new_val
return [float(_UpperCAmelCase ) for i in new_val]
def __lowercase ( _UpperCAmelCase ) -> bool:
'''simple docstring'''
__lowercase , __lowercase = table.shape
__lowercase = True
for i in range(0 , _UpperCAmelCase ):
__lowercase = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 576 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/config.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/config.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"""
),
"""distilbert-base-uncased-finetuned-sst-2-english""": (
"""https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"""
),
}
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = '''distilbert'''
UpperCamelCase = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , A=3_0522 , A=512 , A=False , A=6 , A=12 , A=768 , A=4 * 768 , A=0.1 , A=0.1 , A="gelu" , A=0.02 , A=0.1 , A=0.2 , A=0 , **A , ) -> Dict:
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = sinusoidal_pos_embds
_SCREAMING_SNAKE_CASE = n_layers
_SCREAMING_SNAKE_CASE = n_heads
_SCREAMING_SNAKE_CASE = dim
_SCREAMING_SNAKE_CASE = hidden_dim
_SCREAMING_SNAKE_CASE = dropout
_SCREAMING_SNAKE_CASE = attention_dropout
_SCREAMING_SNAKE_CASE = activation
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = qa_dropout
_SCREAMING_SNAKE_CASE = seq_classif_dropout
super().__init__(**A , pad_token_id=A )
class a_ ( snake_case_ ):
'''simple docstring'''
@property
def snake_case_( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 314 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase ( __lowerCamelCase : list , __lowerCamelCase : list ) ->list:
if len(__lowerCamelCase ) != 2 or len(a[0] ) != 2 or len(__lowerCamelCase ) != 2 or len(b[0] ) != 2:
raise Exception("""Matrices are not 2x2""" )
_SCREAMING_SNAKE_CASE = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def lowerCamelCase ( __lowerCamelCase : list , __lowerCamelCase : list ) ->Dict:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowerCamelCase ) )
]
def lowerCamelCase ( __lowerCamelCase : list , __lowerCamelCase : list ) ->Any:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowerCamelCase ) )
]
def lowerCamelCase ( __lowerCamelCase : list ) ->tuple[list, list, list, list]:
if len(__lowerCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("""Odd matrices are not supported!""" )
_SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = matrix_length // 2
_SCREAMING_SNAKE_CASE = [[a[i][j] for j in range(__lowerCamelCase , __lowerCamelCase )] for i in range(__lowerCamelCase )]
_SCREAMING_SNAKE_CASE = [
[a[i][j] for j in range(__lowerCamelCase , __lowerCamelCase )] for i in range(__lowerCamelCase , __lowerCamelCase )
]
_SCREAMING_SNAKE_CASE = [[a[i][j] for j in range(__lowerCamelCase )] for i in range(__lowerCamelCase )]
_SCREAMING_SNAKE_CASE = [[a[i][j] for j in range(__lowerCamelCase )] for i in range(__lowerCamelCase , __lowerCamelCase )]
return top_left, top_right, bot_left, bot_right
def lowerCamelCase ( __lowerCamelCase : list ) ->tuple[int, int]:
return len(__lowerCamelCase ), len(matrix[0] )
def lowerCamelCase ( __lowerCamelCase : list ) ->None:
print("""\n""".join(str(__lowerCamelCase ) for line in matrix ) )
def lowerCamelCase ( __lowerCamelCase : list , __lowerCamelCase : list ) ->list:
if matrix_dimensions(__lowerCamelCase ) == (2, 2):
return default_matrix_multiplication(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = split_matrix(__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = split_matrix(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = actual_strassen(__lowerCamelCase , matrix_subtraction(__lowerCamelCase , __lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = actual_strassen(matrix_addition(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = actual_strassen(matrix_addition(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = actual_strassen(__lowerCamelCase , matrix_subtraction(__lowerCamelCase , __lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = actual_strassen(matrix_addition(__lowerCamelCase , __lowerCamelCase ) , matrix_addition(__lowerCamelCase , __lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = actual_strassen(matrix_subtraction(__lowerCamelCase , __lowerCamelCase ) , matrix_addition(__lowerCamelCase , __lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = actual_strassen(matrix_subtraction(__lowerCamelCase , __lowerCamelCase ) , matrix_addition(__lowerCamelCase , __lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = matrix_addition(matrix_subtraction(matrix_addition(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ) , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = matrix_addition(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = matrix_addition(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = matrix_subtraction(matrix_subtraction(matrix_addition(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ) , __lowerCamelCase )
# construct the new matrix from our 4 quadrants
_SCREAMING_SNAKE_CASE = []
for i in range(len(__lowerCamelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__lowerCamelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def lowerCamelCase ( __lowerCamelCase : list , __lowerCamelCase : list ) ->list:
if matrix_dimensions(__lowerCamelCase )[1] != matrix_dimensions(__lowerCamelCase )[0]:
_SCREAMING_SNAKE_CASE = (
"""Unable to multiply these matrices, please check the dimensions.\n"""
F'Matrix A: {matrixa}\n'
F'Matrix B: {matrixa}'
)
raise Exception(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = matrix_dimensions(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = matrix_dimensions(__lowerCamelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
_SCREAMING_SNAKE_CASE = max(*__lowerCamelCase , *__lowerCamelCase )
_SCREAMING_SNAKE_CASE = int(math.pow(2 , math.ceil(math.loga(__lowerCamelCase ) ) ) )
_SCREAMING_SNAKE_CASE = matrixa
_SCREAMING_SNAKE_CASE = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
_SCREAMING_SNAKE_CASE = actual_strassen(__lowerCamelCase , __lowerCamelCase )
# Removing the additional zeros
for i in range(0 , __lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowerCamelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
lowercase_ = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
lowercase_ = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 314 | 1 |
"""simple docstring"""
def UpperCAmelCase_ ( __a : str ):
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 |
"""simple docstring"""
def UpperCAmelCase_ ( __a : list ):
'''simple docstring'''
_lowerCamelCase : List[Any] = len(__a )
for _ in range(__a ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
_lowerCamelCase , _lowerCamelCase : Optional[int] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
a_ = list(range(10, 0, -1))
print(F"Original: {arr}. Sorted: {odd_even_transposition(arr)}")
| 349 | 1 |
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
lowercase_ = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def a ( A__ : str = "dhaka" , A__ : int = 5 ) -> int:
"""simple docstring"""
_lowercase =min(A__ , 50 ) # Prevent abuse!
_lowercase ={
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
_lowercase =requests.get('https://www.google.com/search' , params=A__ , headers=A__ )
_lowercase =BeautifulSoup(html.text , 'html.parser' )
_lowercase =''.join(
re.findall(r'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) )
_lowercase =json.dumps(A__ )
_lowercase =json.loads(A__ )
_lowercase =re.findall(
r'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , A__ , )
if not matched_google_image_data:
return 0
_lowercase =re.sub(
r'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(A__ ) , )
_lowercase =re.findall(
r'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , A__ , )
for index, fixed_full_res_image in enumerate(A__ ):
if index >= max_images:
return index
_lowercase =bytes(A__ , 'ascii' ).decode(
'unicode-escape' )
_lowercase =bytes(A__ , 'ascii' ).decode(
'unicode-escape' )
_lowercase =urllib.request.build_opener()
_lowercase =[
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(A__ )
_lowercase =F'''query_{query.replace(" " , "_" )}'''
if not os.path.exists(A__ ):
os.makedirs(A__ )
urllib.request.urlretrieve( # noqa: S310
A__ , F'''{path_name}/original_size_img_{index}.jpg''' )
return index
if __name__ == "__main__":
try:
lowercase_ = download_images_from_google_query(sys.argv[1])
print(f"{image_count} images were downloaded to disk.")
except IndexError:
print('Please provide a search term.')
raise
| 291 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class __lowerCAmelCase ( nn.Module ):
_a = 42
_a = jnp.floataa
def A__ ( self ) -> int:
'''simple docstring'''
_lowercase =nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCAmelCase ) -> Any:
'''simple docstring'''
_lowercase , _lowercase , _lowercase , _lowercase =hidden_states.shape
_lowercase =jax.image.resize(
lowerCAmelCase , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
_lowercase =self.conv(lowerCAmelCase )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
_a = 42
_a = jnp.floataa
def A__ ( self ) -> Any:
'''simple docstring'''
_lowercase =nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
_lowercase =self.conv(lowerCAmelCase )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
_a = 42
_a = None
_a = 0.0
_a = None
_a = jnp.floataa
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
_lowercase =self.in_channels if self.out_channels is None else self.out_channels
_lowercase =nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_lowercase =nn.Conv(
lowerCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_lowercase =nn.Dense(lowerCAmelCase , dtype=self.dtype )
_lowercase =nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_lowercase =nn.Dropout(self.dropout_prob )
_lowercase =nn.Conv(
lowerCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_lowercase =self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
_lowercase =None
if use_nin_shortcut:
_lowercase =nn.Conv(
lowerCAmelCase , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=True ) -> Tuple:
'''simple docstring'''
_lowercase =hidden_states
_lowercase =self.norma(lowerCAmelCase )
_lowercase =nn.swish(lowerCAmelCase )
_lowercase =self.conva(lowerCAmelCase )
_lowercase =self.time_emb_proj(nn.swish(lowerCAmelCase ) )
_lowercase =jnp.expand_dims(jnp.expand_dims(lowerCAmelCase , 1 ) , 1 )
_lowercase =hidden_states + temb
_lowercase =self.norma(lowerCAmelCase )
_lowercase =nn.swish(lowerCAmelCase )
_lowercase =self.dropout(lowerCAmelCase , lowerCAmelCase )
_lowercase =self.conva(lowerCAmelCase )
if self.conv_shortcut is not None:
_lowercase =self.conv_shortcut(lowerCAmelCase )
return hidden_states + residual
| 291 | 1 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
_UpperCamelCase = '\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n'
class a ( unittest.TestCase, a__ ):
def UpperCamelCase_ ( self ):
lowercase = load_tool('text-question-answering' )
self.tool.setup()
lowercase = load_tool('text-question-answering' , remote=lowerCamelCase_ )
def UpperCamelCase_ ( self ):
lowercase = self.tool(lowerCamelCase_ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(lowerCamelCase_ , 'launched the BigScience Research Workshop' )
def UpperCamelCase_ ( self ):
lowercase = self.remote_tool(lowerCamelCase_ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(lowerCamelCase_ , 'launched the BigScience Research Workshop' )
def UpperCamelCase_ ( self ):
lowercase = self.tool(text=lowerCamelCase_ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(lowerCamelCase_ , 'launched the BigScience Research Workshop' )
def UpperCamelCase_ ( self ):
lowercase = self.remote_tool(text=lowerCamelCase_ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(lowerCamelCase_ , 'launched the BigScience Research Workshop' )
| 710 |
"""simple docstring"""
from torch import nn
class a ( nn.Module ):
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
super().__init__()
lowercase = class_size
lowercase = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
lowercase = nn.Linear(_lowerCamelCase , _lowerCamelCase )
def UpperCamelCase_ ( self , _lowerCamelCase ):
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
lowercase = self.mlp(_lowerCamelCase )
return logits
| 134 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 576 |
'''simple docstring'''
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class UpperCAmelCase_ :
"""simple docstring"""
pass
| 173 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_a = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 78 |
"""simple docstring"""
import sys
from collections import defaultdict
class _UpperCAmelCase:
def __init__( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = []
def UpperCAmelCase ( self , __a) -> Optional[Any]:
'''simple docstring'''
return self.node_position[vertex]
def UpperCAmelCase ( self , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = pos
def UpperCAmelCase ( self , __a , __a , __a , __a) -> Tuple:
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_UpperCamelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_UpperCamelCase = 2 * start + 1
else:
_UpperCamelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
_UpperCamelCase , _UpperCamelCase = heap[smallest_child], positions[smallest_child]
_UpperCamelCase , _UpperCamelCase = (
heap[start],
positions[start],
)
_UpperCamelCase , _UpperCamelCase = temp, tempa
_UpperCamelCase = self.get_position(positions[smallest_child])
self.set_position(
positions[smallest_child] , self.get_position(positions[start]))
self.set_position(positions[start] , __a)
self.top_to_bottom(__a , __a , __a , __a)
def UpperCAmelCase ( self , __a , __a , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = position[index]
while index != 0:
_UpperCamelCase = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2)
if val < heap[parent]:
_UpperCamelCase = heap[parent]
_UpperCamelCase = position[parent]
self.set_position(position[parent] , __a)
else:
_UpperCamelCase = val
_UpperCamelCase = temp
self.set_position(__a , __a)
break
_UpperCamelCase = parent
else:
_UpperCamelCase = val
_UpperCamelCase = temp
self.set_position(__a , 0)
def UpperCAmelCase ( self , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = len(__a) // 2 - 1
for i in range(__a , -1 , -1):
self.top_to_bottom(__a , __a , len(__a) , __a)
def UpperCAmelCase ( self , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = positions[0]
_UpperCamelCase = sys.maxsize
self.top_to_bottom(__a , 0 , len(__a) , __a)
return temp
def lowerCamelCase__ ( __snake_case ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = Heap()
_UpperCamelCase = [0] * len(__snake_case )
_UpperCamelCase = [-1] * len(__snake_case ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_UpperCamelCase = [] # Heap of Distance of vertices from their neighboring vertex
_UpperCamelCase = []
for vertex in range(len(__snake_case ) ):
distance_tv.append(sys.maxsize )
positions.append(__snake_case )
heap.node_position.append(__snake_case )
_UpperCamelCase = []
_UpperCamelCase = 1
_UpperCamelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_UpperCamelCase = 0
_UpperCamelCase = distance
heap.heapify(__snake_case, __snake_case )
for _ in range(1, len(__snake_case ) ):
_UpperCamelCase = heap.delete_minimum(__snake_case, __snake_case )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_UpperCamelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__snake_case )]
):
_UpperCamelCase = distance
heap.bottom_to_top(
__snake_case, heap.get_position(__snake_case ), __snake_case, __snake_case )
_UpperCamelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_a = int(input("""Enter number of edges: """).strip())
_a = defaultdict(list)
for _ in range(edges_number):
_a = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 78 | 1 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class A:
'''simple docstring'''
def a__ ( self : Dict , A_ : Dict ) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError()
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
raise NotImplementedError()
class A( __snake_case ):
'''simple docstring'''
def __init__( self : Optional[Any] , A_ : Dict , A_ : List[Any] = False , **A_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = tokenizer
lowerCamelCase_ = skip_prompt
lowerCamelCase_ = decode_kwargs
# variables used in the streaming process
lowerCamelCase_ = []
lowerCamelCase_ = 0
lowerCamelCase_ = True
def a__ ( self : List[str] , A_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('TextStreamer only supports batch size 1' )
elif len(value.shape ) > 1:
lowerCamelCase_ = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
lowerCamelCase_ = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
lowerCamelCase_ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('\n' ):
lowerCamelCase_ = text[self.print_len :]
lowerCamelCase_ = []
lowerCamelCase_ = 0
# If the last token is a CJK character, we print the characters.
elif len(_lowercase ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
lowerCamelCase_ = text[self.print_len :]
self.print_len += len(_lowercase )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
lowerCamelCase_ = text[self.print_len : text.rfind(' ' ) + 1]
self.print_len += len(_lowercase )
self.on_finalized_text(_lowercase )
def a__ ( self : Any ) -> Any:
"""simple docstring"""
if len(self.token_cache ) > 0:
lowerCamelCase_ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
lowerCamelCase_ = text[self.print_len :]
lowerCamelCase_ = []
lowerCamelCase_ = 0
else:
lowerCamelCase_ = """"""
lowerCamelCase_ = True
self.on_finalized_text(_lowercase , stream_end=_lowercase )
def a__ ( self : Dict , A_ : Optional[int] , A_ : str = False ) -> int:
"""simple docstring"""
print(_lowercase , flush=_lowercase , end='' if not stream_end else None )
def a__ ( self : Dict , A_ : List[Any] ) -> List[str]:
"""simple docstring"""
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
class A( __snake_case ):
'''simple docstring'''
def __init__( self : str , A_ : List[str] , A_ : Optional[Any] = False , A_ : Dict = None , **A_ : Tuple ) -> List[str]:
"""simple docstring"""
super().__init__(_lowercase , _lowercase , **_lowercase )
lowerCamelCase_ = Queue()
lowerCamelCase_ = None
lowerCamelCase_ = timeout
def a__ ( self : Optional[Any] , A_ : Any , A_ : str = False ) -> Optional[Any]:
"""simple docstring"""
self.text_queue.put(_lowercase , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : List[Any] ) -> Dict:
"""simple docstring"""
return self
def a__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 70 |
"""simple docstring"""
from __future__ import annotations
import math
def __magic_name__ ( _lowerCamelCase : float , _lowerCamelCase : int ):
__a : Tuple = u
for i in range(1 , _lowerCamelCase ):
__a : List[str] = temp * (u - i)
return temp
def __magic_name__ ( ):
__a : Dict = int(input("""enter the numbers of values: """ ) )
__a : list[list[float]] = []
for _ in range(_lowerCamelCase ):
y.append([] )
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
y[i].append(_lowerCamelCase )
__a : Any = 0
print("""enter the values of parameters in a list: """ )
__a : List[str] = list(map(_lowerCamelCase , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(_lowerCamelCase ):
__a : str = float(input() )
__a : List[Any] = int(input("""enter the value to interpolate: """ ) )
__a : Union[str, Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , _lowerCamelCase ):
for j in range(n - i ):
__a : List[str] = y[j + 1][i - 1] - y[j][i - 1]
__a : str = y[0][0]
for i in range(1 , _lowerCamelCase ):
summ += (ucal(_lowerCamelCase , _lowerCamelCase ) * y[0][i]) / math.factorial(_lowerCamelCase )
print(F'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 581 | 0 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"""facebook/detr-resnet-50""": """https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json""",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class a__ ( snake_case__ ):
_a : str = """detr"""
_a : Union[str, Any] = ["""past_key_values"""]
_a : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , _A=True , _A=None , _A=3 , _A=1_0_0 , _A=6 , _A=2_0_4_8 , _A=8 , _A=6 , _A=2_0_4_8 , _A=8 , _A=0.0 , _A=0.0 , _A=True , _A="relu" , _A=2_5_6 , _A=0.1 , _A=0.0 , _A=0.0 , _A=0.02 , _A=1.0 , _A=False , _A="sine" , _A="resnet50" , _A=True , _A=False , _A=1 , _A=5 , _A=2 , _A=1 , _A=1 , _A=5 , _A=2 , _A=0.1 , **_A , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__lowerCAmelCase = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(_A , _A ):
__lowerCAmelCase = backbone_config.get("model_type" )
__lowerCAmelCase = CONFIG_MAPPING[backbone_model_type]
__lowerCAmelCase = config_class.from_dict(_A )
# set timm attributes to None
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None, None, None
__lowerCAmelCase = use_timm_backbone
__lowerCAmelCase = backbone_config
__lowerCAmelCase = num_channels
__lowerCAmelCase = num_queries
__lowerCAmelCase = d_model
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = encoder_attention_heads
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = init_xavier_std
__lowerCAmelCase = encoder_layerdrop
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = auxiliary_loss
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = backbone
__lowerCAmelCase = use_pretrained_backbone
__lowerCAmelCase = dilation
# Hungarian matcher
__lowerCAmelCase = class_cost
__lowerCAmelCase = bbox_cost
__lowerCAmelCase = giou_cost
# Loss coefficients
__lowerCAmelCase = mask_loss_coefficient
__lowerCAmelCase = dice_loss_coefficient
__lowerCAmelCase = bbox_loss_coefficient
__lowerCAmelCase = giou_loss_coefficient
__lowerCAmelCase = eos_coefficient
super().__init__(is_encoder_decoder=_A , **_A )
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return self.d_model
@classmethod
def __SCREAMING_SNAKE_CASE( cls , _A , **_A ):
"""simple docstring"""
return cls(backbone_config=_A , **_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__lowerCAmelCase = self.backbone_config.to_dict()
__lowerCAmelCase = self.__class__.model_type
return output
class a__ ( snake_case__ ):
_a : List[Any] = version.parse("""1.11""" )
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return 1E-5
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return 1_2
| 707 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class a__ :
def __init__( self , _A ):
"""simple docstring"""
__lowerCAmelCase = data
__lowerCAmelCase = None
class a__ :
def __init__( self ):
"""simple docstring"""
__lowerCAmelCase = None
__lowerCAmelCase = None
def __iter__( self ):
"""simple docstring"""
__lowerCAmelCase = self.head
while self.head:
yield node.data
__lowerCAmelCase = node.next
if node == self.head:
break
def __len__( self ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ):
"""simple docstring"""
return "->".join(str(_A ) for item in iter(self ) )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
self.insert_nth(len(self ) , _A )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
self.insert_nth(0 , _A )
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
if index < 0 or index > len(self ):
raise IndexError("list index out of range." )
__lowerCAmelCase = Node(_A )
if self.head is None:
__lowerCAmelCase = new_node # first node points itself
__lowerCAmelCase = __lowerCAmelCase = new_node
elif index == 0: # insert at head
__lowerCAmelCase = self.head
__lowerCAmelCase = __lowerCAmelCase = new_node
else:
__lowerCAmelCase = self.head
for _ in range(index - 1 ):
__lowerCAmelCase = temp.next
__lowerCAmelCase = temp.next
__lowerCAmelCase = new_node
if index == len(self ) - 1: # insert at tail
__lowerCAmelCase = new_node
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return self.delete_nth(0 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def __SCREAMING_SNAKE_CASE( self , _A = 0 ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise IndexError("list index out of range." )
__lowerCAmelCase = self.head
if self.head == self.tail: # just one node
__lowerCAmelCase = __lowerCAmelCase = None
elif index == 0: # delete head node
__lowerCAmelCase = self.tail.next.next
__lowerCAmelCase = self.head.next
else:
__lowerCAmelCase = self.head
for _ in range(index - 1 ):
__lowerCAmelCase = temp.next
__lowerCAmelCase = temp.next
__lowerCAmelCase = temp.next.next
if index == len(self ) - 1: # delete at tail
__lowerCAmelCase = temp
return delete_node.data
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return len(self ) == 0
def _a ( ):
__lowerCAmelCase = CircularLinkedList()
assert len(SCREAMING_SNAKE_CASE_ ) == 0
assert circular_linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE_ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(SCREAMING_SNAKE_CASE_ ) == i
circular_linked_list.insert_nth(SCREAMING_SNAKE_CASE_ , i + 1 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 552 | 0 |
def UpperCamelCase_ ( ) -> List[Any]:
a__ : Optional[int] = []
a__ : Dict = 1
while len(__a ) < 1e6:
constant.append(str(__a ) )
i += 1
a__ : Dict = "".join(__a )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9_999] )
* int(constant[99_999] )
* int(constant[999_999] )
)
if __name__ == "__main__":
print(solution())
| 37 |
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class lowerCamelCase__ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : List[str] = MvpTokenizer
_UpperCamelCase : Any = MvpTokenizerFast
_UpperCamelCase : List[str] = True
_UpperCamelCase : Dict = filter_roberta_detectors
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
UpperCamelCase__ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCamelCase__ = dict(zip(snake_case , range(len(snake_case ) ) ) )
UpperCamelCase__ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCamelCase__ = {"unk_token": "<unk>"}
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case ) )
def snake_case__ ( self , **snake_case ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
def snake_case__ ( self , **snake_case ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
def snake_case__ ( self , snake_case ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def snake_case__ ( self ):
'''simple docstring'''
return MvpTokenizer.from_pretrained("RUCAIBox/mvp" )
@cached_property
def snake_case__ ( self ):
'''simple docstring'''
return MvpTokenizerFast.from_pretrained("RUCAIBox/mvp" )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCamelCase__ = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ = tokenizer(snake_case , max_length=len(snake_case ) , padding=snake_case , return_tensors="pt" )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCamelCase__ = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case , snake_case )
# Test that special tokens are reset
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ = tokenizer(snake_case , padding=snake_case , return_tensors="pt" )
# check if input_ids are returned and no labels
self.assertIn("input_ids" , snake_case )
self.assertIn("attention_mask" , snake_case )
self.assertNotIn("labels" , snake_case )
self.assertNotIn("decoder_attention_mask" , snake_case )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ = tokenizer(text_target=snake_case , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ = tokenizer(
["I am a small frog" * 1024, "I am a small frog"] , padding=snake_case , truncation=snake_case , return_tensors="pt" )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = ["A long paragraph for summarization."]
UpperCamelCase__ = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ = tokenizer(snake_case , text_target=snake_case , return_tensors="pt" )
UpperCamelCase__ = inputs["input_ids"]
UpperCamelCase__ = inputs["labels"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case )
UpperCamelCase__ = self.tokenizer_class.from_pretrained(snake_case , **snake_case )
UpperCamelCase__ = "A, <mask> AllenNLP sentence."
UpperCamelCase__ = tokenizer_r.encode_plus(snake_case , add_special_tokens=snake_case , return_token_type_ids=snake_case )
UpperCamelCase__ = tokenizer_p.encode_plus(snake_case , add_special_tokens=snake_case , return_token_type_ids=snake_case )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
UpperCamelCase__ = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
UpperCamelCase__ = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
snake_case , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
snake_case , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 551 | 0 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = BertTokenizer
__UpperCAmelCase : Dict = BertTokenizerFast
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Optional[int] = filter_non_english
def _UpperCamelCase ( self ):
super().setUp()
lowerCamelCase_ : Optional[Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCamelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Optional[Any] = "UNwant\u00E9d,running"
lowerCamelCase_ : Optional[int] = "unwanted, running"
return input_text, output_text
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ : Union[str, Any] = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(a_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , [9, 6, 7, 12, 10, 11] )
def _UpperCamelCase ( self ):
if not self.test_rust_tokenizer:
return
lowerCamelCase_ : str = self.get_tokenizer()
lowerCamelCase_ : List[str] = self.get_rust_tokenizer()
lowerCamelCase_ : int = "UNwant\u00E9d,running"
lowerCamelCase_ : str = tokenizer.tokenize(a_ )
lowerCamelCase_ : List[str] = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
lowerCamelCase_ : Dict = tokenizer.encode(a_ , add_special_tokens=a_ )
lowerCamelCase_ : Optional[Any] = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
lowerCamelCase_ : Dict = self.get_rust_tokenizer()
lowerCamelCase_ : Dict = tokenizer.encode(a_ )
lowerCamelCase_ : str = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
# With lower casing
lowerCamelCase_ : int = self.get_tokenizer(do_lower_case=a_ )
lowerCamelCase_ : Tuple = self.get_rust_tokenizer(do_lower_case=a_ )
lowerCamelCase_ : str = "UNwant\u00E9d,running"
lowerCamelCase_ : List[Any] = tokenizer.tokenize(a_ )
lowerCamelCase_ : Any = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
lowerCamelCase_ : Union[str, Any] = tokenizer.encode(a_ , add_special_tokens=a_ )
lowerCamelCase_ : Union[str, Any] = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
lowerCamelCase_ : List[Any] = self.get_rust_tokenizer()
lowerCamelCase_ : Tuple = tokenizer.encode(a_ )
lowerCamelCase_ : Tuple = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = BasicTokenizer(do_lower_case=a_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = BasicTokenizer(do_lower_case=a_ , strip_accents=a_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = BasicTokenizer(do_lower_case=a_ , strip_accents=a_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = BasicTokenizer(do_lower_case=a_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = BasicTokenizer(do_lower_case=a_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = BasicTokenizer(do_lower_case=a_ , strip_accents=a_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = BasicTokenizer(do_lower_case=a_ , strip_accents=a_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = BasicTokenizer(do_lower_case=a_ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = BasicTokenizer()
lowerCamelCase_ : Dict = "a\n'll !!to?'d of, can't."
lowerCamelCase_ : str = ["a", "'", "ll", "!", "!", "to", "?", "'", "d", "of", ",", "can", "'", "t", "."]
self.assertListEqual(tokenizer.tokenize(a_ ) , a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
lowerCamelCase_ : Union[str, Any] = {}
for i, token in enumerate(a_ ):
lowerCamelCase_ : List[str] = i
lowerCamelCase_ : Optional[Any] = WordpieceTokenizer(vocab=a_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def _UpperCamelCase ( self ):
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def _UpperCamelCase ( self ):
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def _UpperCamelCase ( self ):
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = self.get_tokenizer()
lowerCamelCase_ : Any = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(a_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(a_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained("bert-base-uncased" )
lowerCamelCase_ : str = tokenizer.encode("sequence builders" , add_special_tokens=a_ )
lowerCamelCase_ : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=a_ )
lowerCamelCase_ : Dict = tokenizer.build_inputs_with_special_tokens(a_ )
lowerCamelCase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(a_ , a_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def _UpperCamelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase_ : str = self.rust_tokenizer_class.from_pretrained(a_ , **a_ )
lowerCamelCase_ : str = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
lowerCamelCase_ : Dict = tokenizer_r.encode_plus(
a_ , return_attention_mask=a_ , return_token_type_ids=a_ , return_offsets_mapping=a_ , add_special_tokens=a_ , )
lowerCamelCase_ : Any = tokenizer_r.do_lower_case if hasattr(a_ , "do_lower_case" ) else False
lowerCamelCase_ : List[str] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = ["的", "人", "有"]
lowerCamelCase_ : str = "".join(a_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase_ : Dict = True
lowerCamelCase_ : str = self.tokenizer_class.from_pretrained(a_ , **a_ )
lowerCamelCase_ : List[str] = self.rust_tokenizer_class.from_pretrained(a_ , **a_ )
lowerCamelCase_ : Optional[Any] = tokenizer_p.encode(a_ , add_special_tokens=a_ )
lowerCamelCase_ : Dict = tokenizer_r.encode(a_ , add_special_tokens=a_ )
lowerCamelCase_ : Tuple = tokenizer_r.convert_ids_to_tokens(a_ )
lowerCamelCase_ : int = tokenizer_p.convert_ids_to_tokens(a_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(a_ , a_ )
self.assertListEqual(a_ , a_ )
lowerCamelCase_ : List[Any] = False
lowerCamelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(a_ , **a_ )
lowerCamelCase_ : Dict = self.tokenizer_class.from_pretrained(a_ , **a_ )
lowerCamelCase_ : int = tokenizer_r.encode(a_ , add_special_tokens=a_ )
lowerCamelCase_ : Optional[Any] = tokenizer_p.encode(a_ , add_special_tokens=a_ )
lowerCamelCase_ : str = tokenizer_r.convert_ids_to_tokens(a_ )
lowerCamelCase_ : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(a_ )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase_ : Optional[Any] = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(a_ )
]
self.assertListEqual(a_ , a_ )
self.assertListEqual(a_ , a_ )
| 73 |
import re
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if len(re.findall("[ATCG]" , lowerCAmelCase_)) != len(lowerCAmelCase_):
raise ValueError("Invalid Strand")
return dna.translate(dna.maketrans("ATCG" , "TAGC"))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 1 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = False
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = {
"""image_size""": """sample_size""",
"""num_res_blocks""": """layers_per_block""",
"""block_channels""": """block_out_channels""",
"""down_blocks""": """down_block_types""",
"""up_blocks""": """up_block_types""",
"""downscale_freq_shift""": """freq_shift""",
"""resnet_num_groups""": """norm_num_groups""",
"""resnet_act_fn""": """act_fn""",
"""resnet_eps""": """norm_eps""",
"""num_head_channels""": """attention_head_dim""",
}
__lowerCAmelCase = {
"""time_steps""": """time_proj""",
"""mid""": """mid_block""",
"""downsample_blocks""": """down_blocks""",
"""upsample_blocks""": """up_blocks""",
}
__lowerCAmelCase = """""" if has_file(args.repo_path, """config.json""") else """unet"""
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
__lowerCAmelCase = reader.read()
__lowerCAmelCase = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
__lowerCAmelCase = UNetaDModel(**config)
else:
__lowerCAmelCase = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel
__lowerCAmelCase = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__lowerCAmelCase = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__lowerCAmelCase = config[key]
del config[key]
__lowerCAmelCase = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]]
__lowerCAmelCase = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]]
if do_only_weights:
__lowerCAmelCase = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
__lowerCAmelCase = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
__lowerCAmelCase = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
__lowerCAmelCase = param_value
__lowerCAmelCase = True
if not has_changed:
__lowerCAmelCase = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 229 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
UpperCAmelCase_ = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
UpperCAmelCase_ = """</w>"""
UpperCAmelCase_ = """@@ """
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] ) -> List[str]:
_A = set()
_A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A = char
return pairs
# Speech2Text2 has no max input length
UpperCAmelCase_ = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Dict = VOCAB_FILES_NAMES
a__ : str = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]="<s>" , __lowerCAmelCase : Tuple="<pad>" , __lowerCAmelCase : Optional[Any]="</s>" , __lowerCAmelCase : Dict="<unk>" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : str , ) -> Dict:
super().__init__(
unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , **__lowerCAmelCase , )
_A = do_lower_case
with open(__lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle:
_A = json.load(__lowerCAmelCase )
_A = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
_A = None
_A = None
else:
with open(__lowerCAmelCase , encoding='''utf-8''' ) as merges_handle:
_A = merges_handle.read().split('''\n''' )[:-1]
_A = [tuple(merge.split()[:2] ) for merge in merges]
_A = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_A = {}
@property
def snake_case_ ( self : List[str] ) -> int:
return len(self.decoder )
def snake_case_ ( self : Dict ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Any ) -> Union[str, Any]:
_A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_A = get_pairs(__lowerCAmelCase )
if not pairs:
return token
while True:
_A = min(__lowerCAmelCase , key=lambda __lowerCAmelCase : self.bpe_ranks.get(__lowerCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A = bigram
_A = []
_A = 0
while i < len(__lowerCAmelCase ):
try:
_A = word.index(__lowerCAmelCase , __lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_A = j
if word[i] == first and i < len(__lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A = tuple(__lowerCAmelCase )
_A = new_word
if len(__lowerCAmelCase ) == 1:
break
else:
_A = get_pairs(__lowerCAmelCase )
_A = ''' '''.join(__lowerCAmelCase )
if word == "\n " + BPE_TOKEN_MERGES:
_A = '''\n''' + BPE_TOKEN_MERGES
if word.endswith(__lowerCAmelCase ):
_A = word.replace(__lowerCAmelCase , '''''' )
_A = word.replace(''' ''' , __lowerCAmelCase )
_A = word
return word
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : Tuple ) -> Optional[int]:
if self.bpe_ranks is None:
raise ValueError(
'''This tokenizer was instantiated without a `merges.txt` file, so'''
''' that it can only be used for decoding, not for encoding.'''
'''Make sure to provide `merges.txt` file at instantiation to enable '''
'''encoding.''' )
if self.do_lower_case:
_A = text.lower()
_A = text.split()
_A = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__lowerCAmelCase ).split(''' ''' ) ) )
return split_tokens
def snake_case_ ( self : List[Any] , __lowerCAmelCase : str ) -> int:
return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) )
def snake_case_ ( self : str , __lowerCAmelCase : int ) -> str:
_A = self.decoder.get(__lowerCAmelCase , self.unk_token )
return result
def snake_case_ ( self : List[str] , __lowerCAmelCase : List[str] ) -> str:
_A = ''' '''.join(__lowerCAmelCase )
# make sure @@ tokens are concatenated
_A = ''''''.join(string.split(__lowerCAmelCase ) )
return string
def snake_case_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCAmelCase , ensure_ascii=__lowerCAmelCase ) + '''\n''' )
_A = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
_A = token_index
writer.write(''' '''.join(__lowerCAmelCase ) + '''\n''' )
index += 1
return (vocab_file, merges_file)
| 2 | 0 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
lowerCamelCase_ = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def __magic_name__ ( __a : Dict , __a : List[str] , __a : int , __a : Dict , __a : Any , __a : Tuple ):
'''simple docstring'''
if got_ver is None or want_ver is None:
raise ValueError(
f"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"
f" reinstalling {pkg}." )
if not ops[op](version.parse(UpperCAmelCase__ ) , version.parse(UpperCAmelCase__ ) ):
raise ImportError(
f"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" )
def __magic_name__ ( __a : Tuple , __a : Optional[int] = None ):
'''simple docstring'''
UpperCamelCase__ = f"\n{hint}" if hint is not None else """"""
# non-versioned check
if re.match(R"""^[\w_\-\d]+$""" , UpperCAmelCase__ ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = requirement, None, None
else:
UpperCamelCase__ = re.findall(R"""^([^!=<>\s]+)([\s!=<>]{1,2}.+)""" , UpperCAmelCase__ )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"""
f" got {requirement}" )
UpperCamelCase__ , UpperCamelCase__ = match[0]
UpperCamelCase__ = want_full.split(""",""" ) # there could be multiple requirements
UpperCamelCase__ = {}
for w in want_range:
UpperCamelCase__ = re.findall(R"""^([\s!=<>]{1,2})(.+)""" , UpperCAmelCase__ )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"""
f" but got {requirement}" )
UpperCamelCase__ , UpperCamelCase__ = match[0]
UpperCamelCase__ = want_ver
if op not in ops:
raise ValueError(f"{requirement}: need one of {list(ops.keys() )}, but got {op}" )
# special case
if pkg == "python":
UpperCamelCase__ = """.""".join([str(UpperCAmelCase__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return
# check if any version is installed
try:
UpperCamelCase__ = importlib.metadata.version(UpperCAmelCase__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"The '{requirement}' distribution was not found and is required by this application. {hint}" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def __magic_name__ ( __a : List[str] ):
'''simple docstring'''
UpperCamelCase__ = """Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main"""
return require_version(UpperCAmelCase__ , UpperCAmelCase__ )
| 713 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RobertaTokenizer
SCREAMING_SNAKE_CASE__ = RobertaTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = {"""cls_token""": """<s>"""}
def UpperCAmelCase_ (self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
UpperCamelCase__ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
UpperCamelCase__ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
UpperCamelCase__ = {"""unk_token""": """<unk>"""}
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = """lower newer"""
UpperCamelCase__ = """lower newer"""
return input_text, output_text
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase__ = """lower newer"""
UpperCamelCase__ = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
UpperCamelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) # , add_prefix_space=True)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=SCREAMING_SNAKE_CASE_ ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=SCREAMING_SNAKE_CASE_ ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.tokenizer_class.from_pretrained("""roberta-base""" )
UpperCamelCase__ = tokenizer.encode("""sequence builders""" , add_special_tokens=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.encode(
"""sequence builders""" , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = """Encode this sequence."""
UpperCamelCase__ = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing spaces after special tokens
UpperCamelCase__ = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ )} ) # mask token has a left space
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = """Encode <mask> sequence"""
UpperCamelCase__ = """Encode <mask>sequence"""
UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = encoded.index(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = encoded.index(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
pass
def UpperCAmelCase_ (self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = """A, <mask> AllenNLP sentence."""
UpperCamelCase__ = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
UpperCamelCase__ = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
UpperCamelCase__ = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def UpperCAmelCase_ (self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
UpperCamelCase__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , SCREAMING_SNAKE_CASE_ )
self.assertEqual(post_processor_state["""add_prefix_space"""] , SCREAMING_SNAKE_CASE_ )
self.assertEqual(post_processor_state["""trim_offsets"""] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCamelCase__ = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ = F"{text_of_1_token} {text_of_1_token}"
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_ ) + 1, len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_ ) + 1, len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_ ), len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_ ), len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , )
UpperCamelCase__ = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(SCREAMING_SNAKE_CASE_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE_ ) + 1, 1 + len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(SCREAMING_SNAKE_CASE_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE_ ), 1 + len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(SCREAMING_SNAKE_CASE_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE_ ), 1 + len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , )
| 86 | 0 |
def __lowercase ( a__ = 3 , a__ = 7 , a__ = 1_00_00_00 ) -> int:
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
for current_denominator in range(1 , limit + 1 ):
__SCREAMING_SNAKE_CASE = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__SCREAMING_SNAKE_CASE = current_numerator
__SCREAMING_SNAKE_CASE = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1000000))
| 148 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ : Tuple =logging.get_logger(__name__)
lowerCAmelCase__ : Union[str, Any] ={
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Dict = '''roberta-prelayernorm'''
def __init__( self , _A=50_265 , _A=768 , _A=12 , _A=12 , _A=3_072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=2 , _A=0.0_2 , _A=1e-12 , _A=1 , _A=0 , _A=2 , _A="absolute" , _A=True , _A=None , **_A , ):
'''simple docstring'''
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = classifier_dropout
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
@property
def _A ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 148 | 1 |
'''simple docstring'''
import os
import string
import sys
snake_case = 1 << 8
snake_case = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
snake_case = KEYMAP["""up"""]
snake_case = KEYMAP["""left"""]
if sys.platform == "win32":
snake_case = []
snake_case = {
B"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
B"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
snake_case = ord(str(i))
def UpperCAmelCase_ ( ):
"""simple docstring"""
if os.name == "nt":
import msvcrt
lowerCAmelCase__ : int = "mbcs"
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(lowerCamelCase_ ) == 0:
# Read the keystroke
lowerCAmelCase__ : Dict = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase__ : List[Any] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase__ : List[str] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"] ) )
WIN_CH_BUFFER.append(lowerCamelCase_ )
if ord(lowerCamelCase_ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_2_6 ) )
lowerCAmelCase__ : Optional[Any] = chr(KEYMAP["esc"] )
except KeyError:
lowerCAmelCase__ : Tuple = cha[1]
else:
lowerCAmelCase__ : Optional[int] = ch.decode(lowerCamelCase_ )
else:
lowerCAmelCase__ : List[str] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase__ : Optional[int] = sys.stdin.fileno()
lowerCAmelCase__ : Any = termios.tcgetattr(lowerCamelCase_ )
try:
tty.setraw(lowerCamelCase_ )
lowerCAmelCase__ : List[Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(lowerCamelCase_ , termios.TCSADRAIN , lowerCamelCase_ )
return ch
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : int = get_raw_chars()
if ord(lowerCamelCase_ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(lowerCamelCase_ ) == KEYMAP["esc"]:
lowerCAmelCase__ : Any = get_raw_chars()
if ord(lowerCamelCase_ ) == KEYMAP["mod_int"]:
lowerCAmelCase__ : List[str] = get_raw_chars()
if ord(lowerCamelCase_ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowerCamelCase_ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(lowerCamelCase_ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 568 |
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def UpperCAmelCase_ ( lowerCamelCase_ = 2_0_0_0_0_0_0 ):
"""simple docstring"""
lowerCAmelCase__ : list[int] = [0]
lowerCAmelCase__ : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
lowerCAmelCase__ : int = 0
# the area corresponding to the grid that gives the product closest to target
lowerCAmelCase__ : int = 0
# an estimate of b, using the quadratic formula
lowerCAmelCase__ : float
# the largest integer less than b_estimate
lowerCAmelCase__ : int
# the largest integer less than b_estimate
lowerCAmelCase__ : int
# the triangle number corresponding to b_floor
lowerCAmelCase__ : int
# the triangle number corresponding to b_ceil
lowerCAmelCase__ : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
lowerCAmelCase__ : Dict = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
lowerCAmelCase__ : Optional[int] = floor(lowerCamelCase_ )
lowerCAmelCase__ : Any = ceil(lowerCamelCase_ )
lowerCAmelCase__ : Optional[Any] = triangle_numbers[b_floor]
lowerCAmelCase__ : Optional[int] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
lowerCAmelCase__ : Tuple = triangle_b_first_guess * triangle_a
lowerCAmelCase__ : List[str] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
lowerCAmelCase__ : Dict = triangle_b_second_guess * triangle_a
lowerCAmelCase__ : Dict = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f'{solution() = }')
| 568 | 1 |
'''simple docstring'''
import math
def lowerCAmelCase (__A):
"""simple docstring"""
_a = [True] * n
_a = False
_a = False
_a = True
for i in range(3 , int(n**0.5 + 1) , 2):
_a = i * 2
while index < n:
_a = False
_a = index + i
_a = [2]
for i in range(3 , __A , 2):
if is_prime[i]:
primes.append(__A)
return primes
def lowerCAmelCase (__A = 999_966_663_333):
"""simple docstring"""
_a = math.floor(math.sqrt(__A)) + 100
_a = prime_sieve(__A)
_a = 0
_a = 0
_a = primes[prime_index]
while (last_prime**2) <= limit:
_a = primes[prime_index + 1]
_a = last_prime**2
_a = next_prime**2
# Get numbers divisible by lps(current)
_a = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
_a = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
_a = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
_a = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 11 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class UpperCAmelCase :
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def lowerCamelCase__ ( _A ):
'''simple docstring'''
def is_valid_tree(_A ) -> bool:
if node is None:
return True
if not isinstance(_A , _A ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(_A ):
raise ValueError(
"Each node should be type of TreeNode and data should be float." )
def is_binary_search_tree_recursive_check(
_A , _A , _A ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , _A , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , _A )
)
return is_binary_search_tree_recursive_check(_A , -float("inf" ) , float("inf" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 376 | 0 |
"""simple docstring"""
from __future__ import annotations
lowerCamelCase = 10
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = 1
UpperCAmelCase_ = max(lowerCAmelCase__ )
while placement <= max_digit:
# declare and initialize empty buckets
UpperCAmelCase_ = [[] for _ in range(lowerCAmelCase__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
UpperCAmelCase_ = int((i / placement) % RADIX )
buckets[tmp].append(lowerCAmelCase__ )
# put each buckets' contents into list_of_ints
UpperCAmelCase_ = 0
for b in range(lowerCAmelCase__ ):
for i in buckets[b]:
UpperCAmelCase_ = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14 | 0 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _a ( lowercase__ : bytes , lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = f'''{sampling_rate}'''
SCREAMING_SNAKE_CASE__ : Any = '1'
SCREAMING_SNAKE_CASE__ : int = 'f32le'
SCREAMING_SNAKE_CASE__ : Dict = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(lowercase__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ffmpeg_process.communicate(lowercase__ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
SCREAMING_SNAKE_CASE__ : Any = output_stream[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.frombuffer(lowercase__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def _a ( lowercase__ : int , lowercase__ : float , lowercase__ : str = "f32le" , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = f'''{sampling_rate}'''
SCREAMING_SNAKE_CASE__ : Optional[int] = '1'
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
SCREAMING_SNAKE_CASE__ : Dict = platform.system()
if system == "Linux":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'alsa'
SCREAMING_SNAKE_CASE__ : Any = 'default'
elif system == "Darwin":
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'avfoundation'
SCREAMING_SNAKE_CASE__ : Dict = ':0'
elif system == "Windows":
SCREAMING_SNAKE_CASE__ : int = 'dshow'
SCREAMING_SNAKE_CASE__ : Dict = 'default'
SCREAMING_SNAKE_CASE__ : int = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
SCREAMING_SNAKE_CASE__ : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
SCREAMING_SNAKE_CASE__ : int = _ffmpeg_stream(lowercase__ , lowercase__ )
for item in iterator:
yield item
def _a ( lowercase__ : int , lowercase__ : float , lowercase__ : Optional[int] = None , lowercase__ : Optional[Union[Tuple[float, float], float]] = None , lowercase__ : str = "f32le" , ):
'''simple docstring'''
if stream_chunk_s is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = stream_chunk_s
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = chunk_length_s
SCREAMING_SNAKE_CASE__ : Any = ffmpeg_microphone(lowercase__ , lowercase__ , format_for_conversion=lowercase__ )
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE__ : int = np.intaa
SCREAMING_SNAKE_CASE__ : Any = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE__ : int = np.floataa
SCREAMING_SNAKE_CASE__ : Optional[int] = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
SCREAMING_SNAKE_CASE__ : str = chunk_length_s / 6
SCREAMING_SNAKE_CASE__ : List[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowercase__ , (int, float) ):
SCREAMING_SNAKE_CASE__ : List[str] = [stride_length_s, stride_length_s]
SCREAMING_SNAKE_CASE__ : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
SCREAMING_SNAKE_CASE__ : Tuple = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
SCREAMING_SNAKE_CASE__ : List[Any] = datetime.datetime.now()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = datetime.timedelta(seconds=lowercase__ )
for item in chunk_bytes_iter(lowercase__ , lowercase__ , stride=(stride_left, stride_right) , stream=lowercase__ ):
# Put everything back in numpy scale
SCREAMING_SNAKE_CASE__ : Tuple = np.frombuffer(item['raw'] , dtype=lowercase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _a ( lowercase__ : Optional[int] , lowercase__ : int , lowercase__ : Tuple[int, int] , lowercase__ : bool = False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = b''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
SCREAMING_SNAKE_CASE__ : int = 0
for raw in iterator:
acc += raw
if stream and len(lowercase__ ) < chunk_len:
SCREAMING_SNAKE_CASE__ : int = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowercase__ ) >= chunk_len:
# We are flushing the accumulator
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (_stride_left, stride_right)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
SCREAMING_SNAKE_CASE__ : Optional[int] = False
yield item
SCREAMING_SNAKE_CASE__ : Optional[Any] = stride_left
SCREAMING_SNAKE_CASE__ : Tuple = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowercase__ ) > stride_left:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
SCREAMING_SNAKE_CASE__ : Optional[int] = False
yield item
def _a ( lowercase__ : List[Any] , lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = 2**24 # 16Mo
try:
with subprocess.Popen(lowercase__ , stdout=subprocess.PIPE , bufsize=lowercase__ ) as ffmpeg_process:
while True:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ffmpeg_process.stdout.read(lowercase__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 85 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
A = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
A = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(__UpperCamelCase )
A = -1
A = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__UpperCamelCase )
A = model.generate(__UpperCamelCase , max_new_tokens=10 , do_sample=__UpperCamelCase )
A = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
A = TextStreamer(__UpperCamelCase )
model.generate(__UpperCamelCase , max_new_tokens=10 , do_sample=__UpperCamelCase , streamer=__UpperCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A = cs.out[:-1]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def __UpperCamelCase ( self : int ) -> List[Any]:
A = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
A = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(__UpperCamelCase )
A = -1
A = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__UpperCamelCase )
A = model.generate(__UpperCamelCase , max_new_tokens=10 , do_sample=__UpperCamelCase )
A = tokenizer.decode(greedy_ids[0] )
A = TextIteratorStreamer(__UpperCamelCase )
A = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
A = Thread(target=model.generate , kwargs=__UpperCamelCase )
thread.start()
A = ''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def __UpperCamelCase ( self : int ) -> Tuple:
A = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
A = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(__UpperCamelCase )
A = -1
A = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__UpperCamelCase )
A = model.generate(__UpperCamelCase , max_new_tokens=10 , do_sample=__UpperCamelCase )
A = greedy_ids[:, input_ids.shape[1] :]
A = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
A = TextStreamer(__UpperCamelCase , skip_prompt=__UpperCamelCase )
model.generate(__UpperCamelCase , max_new_tokens=10 , do_sample=__UpperCamelCase , streamer=__UpperCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A = cs.out[:-1]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
A = AutoTokenizer.from_pretrained('distilgpt2' )
A = AutoModelForCausalLM.from_pretrained('distilgpt2' ).to(__UpperCamelCase )
A = -1
A = torch.ones((1, 5) , device=__UpperCamelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
A = TextStreamer(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
model.generate(__UpperCamelCase , max_new_tokens=1 , do_sample=__UpperCamelCase , streamer=__UpperCamelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
A = cs.out[:-1] # Remove the final "\n"
A = tokenizer(__UpperCamelCase , return_tensors='pt' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __UpperCamelCase ( self : Any ) -> Dict:
A = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
A = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(__UpperCamelCase )
A = -1
A = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__UpperCamelCase )
A = TextIteratorStreamer(__UpperCamelCase , timeout=0.0_0_1 )
A = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
A = Thread(target=model.generate , kwargs=__UpperCamelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__UpperCamelCase ):
A = ''
for new_text in streamer:
streamer_text += new_text | 106 | 0 |
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Tuple = StableDiffusionControlNetImgaImgPipeline
_A : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
_A : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_A : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
_A : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
__lowercase : Optional[int] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
__lowercase : Union[str, Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
__lowercase : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__lowercase : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__lowercase : List[Any] = CLIPTextModel(__a )
__lowercase : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowercase : Dict = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase ( self : List[Any] , __a : str , __a : List[Any]=0 ) -> Optional[Any]:
"""simple docstring"""
if str(__a ).startswith("""mps""" ):
__lowercase : Dict = torch.manual_seed(__a )
else:
__lowercase : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a )
__lowercase : str = 2
__lowercase : Union[str, Any] = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__a , device=torch.device(__a ) , )
__lowercase : Optional[int] = floats_tensor(control_image.shape , rng=random.Random(__a ) ).to(__a )
__lowercase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowercase : str = Image.fromarray(np.uinta(__a ) ).convert("""RGB""" ).resize((64, 64) )
__lowercase : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Dict = StableDiffusionControlNetImgaImgPipeline
_A : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
_A : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_A : Optional[Any] = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(__a : List[Any] ):
if isinstance(__a , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
__lowercase : Dict = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__a )
torch.manual_seed(0 )
__lowercase : int = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__a )
torch.manual_seed(0 )
__lowercase : Optional[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
__lowercase : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__lowercase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__lowercase : Optional[int] = CLIPTextModel(__a )
__lowercase : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowercase : Optional[int] = MultiControlNetModel([controlneta, controlneta] )
__lowercase : List[Any] = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Optional[int]=0 ) -> Dict:
"""simple docstring"""
if str(__a ).startswith("""mps""" ):
__lowercase : List[str] = torch.manual_seed(__a )
else:
__lowercase : List[Any] = torch.Generator(device=__a ).manual_seed(__a )
__lowercase : List[str] = 2
__lowercase : Optional[Any] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__a , device=torch.device(__a ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__a , device=torch.device(__a ) , ),
]
__lowercase : Union[str, Any] = floats_tensor(control_image[0].shape , rng=random.Random(__a ) ).to(__a )
__lowercase : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowercase : List[Any] = Image.fromarray(np.uinta(__a ) ).convert("""RGB""" ).resize((64, 64) )
__lowercase : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = self.get_dummy_components()
__lowercase : str = self.pipeline_class(**__a )
pipe.to(__a )
__lowercase : int = 10.0
__lowercase : Tuple = 4
__lowercase : Tuple = self.get_dummy_inputs(__a )
__lowercase : Tuple = steps
__lowercase : Optional[int] = scale
__lowercase : int = pipe(**__a )[0]
__lowercase : Any = self.get_dummy_inputs(__a )
__lowercase : Union[str, Any] = steps
__lowercase : Union[str, Any] = scale
__lowercase : Optional[Any] = pipe(**__a , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
__lowercase : int = self.get_dummy_inputs(__a )
__lowercase : List[Any] = steps
__lowercase : List[Any] = scale
__lowercase : str = pipe(**__a , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
__lowercase : Any = self.get_dummy_inputs(__a )
__lowercase : List[str] = steps
__lowercase : int = scale
__lowercase : List[str] = pipe(**__a , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase : str = self.get_dummy_components()
__lowercase : Dict = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__a )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : int = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" )
__lowercase : List[str] = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , safety_checker=__a , controlnet=__a )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__a )
__lowercase : List[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase : str = """evil space-punk bird"""
__lowercase : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) )
__lowercase : Union[str, Any] = load_image(
"""https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) )
__lowercase : Dict = pipe(
__a , __a , control_image=__a , generator=__a , output_type="""np""" , num_inference_steps=50 , strength=0.6 , )
__lowercase : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
__lowercase : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" )
assert np.abs(expected_image - image ).max() < 9E-2 | 649 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowerCAmelCase ( __a , __a ):
'''simple docstring'''
_A : str = 1
@register_to_config
def __init__( self : Optional[int] , __a : Tuple=2000 , __a : List[str]=0.1 , __a : str=20 , __a : Optional[int]=1E-3 ) -> int:
"""simple docstring"""
__lowercase : Tuple = None
__lowercase : Union[str, Any] = None
__lowercase : int = None
def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Union[str, torch.device] = None ) -> str:
"""simple docstring"""
__lowercase : List[str] = torch.linspace(1 , self.config.sampling_eps , __a , device=__a )
def lowerCAmelCase ( self : Tuple , __a : List[Any] , __a : Tuple , __a : int , __a : Optional[int]=None ) -> str:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowercase : Dict = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowercase : int = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowercase : Union[str, Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
__lowercase : Optional[Any] = std.unsqueeze(-1 )
__lowercase : List[Any] = -score / std
# compute
__lowercase : Dict = -1.0 / len(self.timesteps )
__lowercase : int = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowercase : List[Any] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowercase : Union[str, Any] = beta_t.unsqueeze(-1 )
__lowercase : List[str] = -0.5 * beta_t * x
__lowercase : int = torch.sqrt(__a )
__lowercase : Union[str, Any] = drift - diffusion**2 * score
__lowercase : Optional[Any] = x + drift * dt
# add noise
__lowercase : List[str] = randn_tensor(x.shape , layout=x.layout , generator=__a , device=x.device , dtype=x.dtype )
__lowercase : str = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return self.config.num_train_timesteps | 649 | 1 |
from __future__ import annotations
from typing import Any
def lowerCAmelCase ( UpperCamelCase__ : list[Any] ) -> None:
"""simple docstring"""
create_state_space_tree(UpperCamelCase__ , [] , 0 )
def lowerCAmelCase ( UpperCamelCase__ : list[Any] , UpperCamelCase__ : list[Any] , UpperCamelCase__ : int ) -> None:
"""simple docstring"""
if index == len(UpperCamelCase__ ):
print(UpperCamelCase__ )
return
create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
lowerCAmelCase : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 202 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase : List[str] = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 202 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowercase__ ( __UpperCamelCase : list[int] ):
'''simple docstring'''
if not nums:
return 0
__lowercase = nums[0]
__lowercase = 0
for num in nums[1:]:
__lowercase , __lowercase = (
max_excluding + num,
max(__UpperCamelCase , __UpperCamelCase ),
)
return max(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 339 |
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCamelCase__( snake_case_ , unittest.TestCase ):
UpperCamelCase : Dict = MobileBertTokenizer
UpperCamelCase : Optional[int] = MobileBertTokenizerFast
UpperCamelCase : Union[str, Any] = True
UpperCamelCase : int = True
UpperCamelCase : Dict = filter_non_english
UpperCamelCase : Any = "google/mobilebert-uncased"
def __magic_name__ ( self ):
"""simple docstring"""
super().setUp()
__lowercase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
__lowercase = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = """UNwant\u00E9d,running"""
__lowercase = """unwanted, running"""
return input_text, output_text
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(__UpperCAmelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def __magic_name__ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
__lowercase = """UNwant\u00E9d,running"""
__lowercase = tokenizer.tokenize(__UpperCAmelCase )
__lowercase = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowercase = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = self.get_rust_tokenizer()
__lowercase = tokenizer.encode(__UpperCAmelCase )
__lowercase = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
# With lower casing
__lowercase = self.get_tokenizer(do_lower_case=__UpperCAmelCase )
__lowercase = self.get_rust_tokenizer(do_lower_case=__UpperCAmelCase )
__lowercase = """UNwant\u00E9d,running"""
__lowercase = tokenizer.tokenize(__UpperCAmelCase )
__lowercase = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowercase = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = self.get_rust_tokenizer()
__lowercase = tokenizer.encode(__UpperCAmelCase )
__lowercase = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=__UpperCAmelCase , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
__lowercase = {}
for i, token in enumerate(__UpperCAmelCase ):
__lowercase = i
__lowercase = WordpieceTokenizer(vocab=__UpperCAmelCase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def __magic_name__ ( self ):
"""simple docstring"""
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def __magic_name__ ( self ):
"""simple docstring"""
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def __magic_name__ ( self ):
"""simple docstring"""
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__UpperCAmelCase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(__UpperCAmelCase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
__lowercase = tokenizer.encode("""sequence builders""" , add_special_tokens=__UpperCAmelCase )
__lowercase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def __magic_name__ ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__lowercase = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
__lowercase = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__lowercase = tokenizer_r.encode_plus(
__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , )
__lowercase = tokenizer_r.do_lower_case if hasattr(__UpperCAmelCase , """do_lower_case""" ) else False
__lowercase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), """Allen"""),
((2_1, 2_3), """##NL"""),
((2_3, 2_4), """##P"""),
((2_5, 3_3), """sentence"""),
((3_3, 3_4), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), """allen"""),
((2_1, 2_3), """##nl"""),
((2_3, 2_4), """##p"""),
((2_5, 3_3), """sentence"""),
((3_3, 3_4), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = ["""的""", """人""", """有"""]
__lowercase = """""".join(__UpperCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__lowercase = True
__lowercase = self.tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
__lowercase = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
__lowercase = tokenizer_p.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowercase = tokenizer_r.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowercase = tokenizer_r.convert_ids_to_tokens(__UpperCAmelCase )
__lowercase = tokenizer_p.convert_ids_to_tokens(__UpperCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = False
__lowercase = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
__lowercase = self.tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
__lowercase = tokenizer_r.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowercase = tokenizer_p.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowercase = tokenizer_r.convert_ids_to_tokens(__UpperCAmelCase )
__lowercase = tokenizer_p.convert_ids_to_tokens(__UpperCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
__lowercase = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__UpperCAmelCase )
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 339 | 1 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __A ( UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__( self :List[str] , __snake_case :bool , __snake_case :Optional[int] = None , __snake_case :Optional[int] = None ):
'''simple docstring'''
super().__init__()
__magic_name__ : int =learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__magic_name__ : Dict =torch.zeros(__snake_case , __snake_case )
else:
__magic_name__ : Tuple =None
__magic_name__ : int =torch.nn.Parameter(__snake_case )
class __A ( UpperCamelCase__ ):
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
def __init__( self :Dict , __snake_case :VQModel , __snake_case :CLIPTextModel , __snake_case :CLIPTokenizer , __snake_case :TransformeraDModel , __snake_case :VQDiffusionScheduler , __snake_case :LearnedClassifierFreeSamplingEmbeddings , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=__snake_case , transformer=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , scheduler=__snake_case , learned_classifier_free_sampling_embeddings=__snake_case , )
def A__ ( self :Optional[int] , __snake_case :Optional[int] , __snake_case :Optional[Any] , __snake_case :Dict ):
'''simple docstring'''
__magic_name__ : List[str] =len(__snake_case ) if isinstance(__snake_case , __snake_case ) else 1
# get prompt text embeddings
__magic_name__ : str =self.tokenizer(
__snake_case , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__magic_name__ : str =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__magic_name__ : int =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
__magic_name__ : List[Any] =text_input_ids[:, : self.tokenizer.model_max_length]
__magic_name__ : Union[str, Any] =self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__magic_name__ : Optional[Any] =prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__snake_case )
# duplicate text embeddings for each generation per prompt
__magic_name__ : Any =prompt_embeds.repeat_interleave(__snake_case , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__magic_name__ : Union[str, Any] =self.learned_classifier_free_sampling_embeddings.embeddings
__magic_name__ : Optional[Any] =negative_prompt_embeds.unsqueeze(0 ).repeat(__snake_case , 1 , 1 )
else:
__magic_name__ : Any =[""""""] * batch_size
__magic_name__ : List[str] =text_input_ids.shape[-1]
__magic_name__ : Tuple =self.tokenizer(
__snake_case , padding="""max_length""" , max_length=__snake_case , truncation=__snake_case , return_tensors="""pt""" , )
__magic_name__ : List[Any] =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__magic_name__ : Any =negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__snake_case )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__magic_name__ : Any =negative_prompt_embeds.shape[1]
__magic_name__ : str =negative_prompt_embeds.repeat(1 , __snake_case , 1 )
__magic_name__ : Any =negative_prompt_embeds.view(batch_size * num_images_per_prompt , __snake_case , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__magic_name__ : Optional[int] =torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self :Optional[Any] , __snake_case :Union[str, List[str]] , __snake_case :int = 1_00 , __snake_case :float = 5.0 , __snake_case :float = 1.0 , __snake_case :int = 1 , __snake_case :Optional[Union[torch.Generator, List[torch.Generator]]] = None , __snake_case :Optional[torch.FloatTensor] = None , __snake_case :Optional[str] = "pil" , __snake_case :bool = True , __snake_case :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __snake_case :int = 1 , ):
'''simple docstring'''
if isinstance(__snake_case , __snake_case ):
__magic_name__ : str =1
elif isinstance(__snake_case , __snake_case ):
__magic_name__ : List[str] =len(__snake_case )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(__snake_case )}" )
__magic_name__ : List[str] =batch_size * num_images_per_prompt
__magic_name__ : Dict =guidance_scale > 1.0
__magic_name__ : Union[str, Any] =self._encode_prompt(__snake_case , __snake_case , __snake_case )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__snake_case , __snake_case ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(__snake_case )}." )
# get the initial completely masked latents unless the user supplied it
__magic_name__ : List[Any] =(batch_size, self.transformer.num_latent_pixels)
if latents is None:
__magic_name__ : List[Any] =self.transformer.num_vector_embeds - 1
__magic_name__ : Union[str, Any] =torch.full(__snake_case , __snake_case ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
f" {self.transformer.num_vector_embeds - 1} (inclusive)." )
__magic_name__ : Dict =latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__snake_case , device=self.device )
__magic_name__ : Optional[Any] =self.scheduler.timesteps.to(self.device )
__magic_name__ : List[Any] =latents
for i, t in enumerate(self.progress_bar(__snake_case ) ):
# expand the sample if we are doing classifier free guidance
__magic_name__ : Dict =torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__magic_name__ : List[str] =self.transformer(__snake_case , encoder_hidden_states=__snake_case , timestep=__snake_case ).sample
if do_classifier_free_guidance:
__magic_name__ , __magic_name__ : List[str] =model_output.chunk(2 )
__magic_name__ : List[Any] =model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__snake_case , dim=1 , keepdim=__snake_case )
__magic_name__ : Any =self.truncate(__snake_case , __snake_case )
# remove `log(0)`'s (`-inf`s)
__magic_name__ : Union[str, Any] =model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
__magic_name__ : Union[str, Any] =self.scheduler.step(__snake_case , timestep=__snake_case , sample=__snake_case , generator=__snake_case ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__snake_case , __snake_case , __snake_case )
__magic_name__ : Tuple =self.vqvae.config.vq_embed_dim
__magic_name__ : str =(batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__magic_name__ : Optional[int] =self.vqvae.quantize.get_codebook_entry(__snake_case , shape=__snake_case )
__magic_name__ : Any =self.vqvae.decode(__snake_case , force_not_quantize=__snake_case ).sample
__magic_name__ : Any =(image / 2 + 0.5).clamp(0 , 1 )
__magic_name__ : Optional[Any] =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__magic_name__ : Union[str, Any] =self.numpy_to_pil(__snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__snake_case )
def A__ ( self :List[str] , __snake_case :torch.FloatTensor , __snake_case :float ):
'''simple docstring'''
__magic_name__ , __magic_name__ : int =torch.sort(__snake_case , 1 , descending=__snake_case )
__magic_name__ : Any =torch.exp(__snake_case )
__magic_name__ : Dict =sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__magic_name__ : Dict =torch.full_like(keep_mask[:, 0:1, :] , __snake_case )
__magic_name__ : str =torch.cat((all_true, keep_mask) , dim=1 )
__magic_name__ : Tuple =keep_mask[:, :-1, :]
__magic_name__ : Optional[int] =keep_mask.gather(1 , indices.argsort(1 ) )
__magic_name__ : Optional[int] =log_p_x_0.clone()
__magic_name__ : Union[str, Any] =-torch.inf # -inf = log(0)
return rv
| 21 |
'''simple docstring'''
import numpy as np
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
A: Any = int(np.ceil((x_end - xa) / h ) )
A: Union[str, Any] = np.zeros((n + 1,) )
A: Optional[int] = ya
A: int = xa
for k in range(lowerCamelCase__ ):
A: Optional[int] = f(lowerCamelCase__ , y[k] )
A: int = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
A: List[str] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
A: Optional[int] = f(x + h , y[k] + h * ka )
A: Optional[Any] = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 135 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float:
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
a : Any = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """sequence-classification"""
def __init__( self : List[str] , a_ : str ):
"""simple docstring"""
if type(a_ ) == dict:
__snake_case = Namespace(**a_ )
__snake_case = glue_output_modes[hparams.task]
__snake_case = glue_tasks_num_labels[hparams.task]
super().__init__(a_ , a_ , self.mode )
def A ( self : Union[str, Any] , **a_ : List[Any] ):
"""simple docstring"""
return self.model(**a_ )
def A ( self : int , a_ : Optional[Any] , a_ : int ):
"""simple docstring"""
__snake_case = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__snake_case = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
__snake_case = self(**a_ )
__snake_case = outputs[0]
__snake_case = self.trainer.lr_schedulers[0]["scheduler"]
__snake_case = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = self.hparams
__snake_case = processors[args.task]()
__snake_case = processor.get_labels()
for mode in ["train", "dev"]:
__snake_case = self._feature_file(a_ )
if os.path.exists(a_ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , a_ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
__snake_case = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
__snake_case = convert_examples_to_features(
a_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , a_ )
torch.save(a_ , a_ )
def A ( self : Optional[int] , a_ : str , a_ : int , a_ : bool = False ):
"""simple docstring"""
__snake_case = "dev" if mode == "test" else mode
__snake_case = self._feature_file(a_ )
logger.info("Loading features from cached file %s" , a_ )
__snake_case = torch.load(a_ )
__snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
__snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
__snake_case = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
__snake_case = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(a_ , a_ , a_ , a_ ) , batch_size=a_ , shuffle=a_ , )
def A ( self : int , a_ : List[str] , a_ : Tuple ):
"""simple docstring"""
__snake_case = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__snake_case = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
__snake_case = self(**a_ )
__snake_case , __snake_case = outputs[:2]
__snake_case = logits.detach().cpu().numpy()
__snake_case = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A ( self : Dict , a_ : Optional[int] ):
"""simple docstring"""
__snake_case = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
__snake_case = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
__snake_case = np.argmax(a_ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
__snake_case = np.squeeze(a_ )
__snake_case = np.concatenate([x["target"] for x in outputs] , axis=0 )
__snake_case = [[] for _ in range(out_label_ids.shape[0] )]
__snake_case = [[] for _ in range(out_label_ids.shape[0] )]
__snake_case = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , a_ , a_ )}
__snake_case = dict(results.items() )
__snake_case = results
return ret, preds_list, out_label_list
def A ( self : Tuple , a_ : list ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case = self._eval_end(a_ )
__snake_case = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A ( self : int , a_ : Tuple ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case = self._eval_end(a_ )
__snake_case = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A ( a_ : str , a_ : Any ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(a_ , a_ )
parser.add_argument(
"--max_seq_length" , default=128 , type=a_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=a_ , required=a_ , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=a_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def __UpperCAmelCase ( ) -> Union[str, Any]:
__snake_case = argparse.ArgumentParser()
add_generic_args(_UpperCAmelCase , os.getcwd() )
__snake_case = GLUETransformer.add_model_specific_args(_UpperCAmelCase , os.getcwd() )
__snake_case = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
__snake_case = os.path.join(
"./results" , F'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , )
os.makedirs(args.output_dir )
__snake_case = GLUETransformer(_UpperCAmelCase )
__snake_case = generic_train(_UpperCAmelCase , _UpperCAmelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
__snake_case = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=_UpperCAmelCase ) )
__snake_case = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 680 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
UpperCAmelCase = False
@skip_mps
class lowercase__ ( __a ,__a ,__a ,unittest.TestCase ):
__UpperCAmelCase = StableDiffusionAttendAndExcitePipeline
__UpperCAmelCase = False
__UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
__UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
__UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
__UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def UpperCamelCase_ ( cls) -> str:
super().setUpClass()
torch.use_deterministic_algorithms(a_)
@classmethod
def UpperCamelCase_ ( cls) -> Union[str, Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(a_)
def UpperCamelCase_ ( self) -> int:
torch.manual_seed(0)
_lowerCamelCase : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a_ , )
_lowerCamelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=a_ , set_alpha_to_one=a_ , )
torch.manual_seed(0)
_lowerCamelCase : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
_lowerCamelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
_lowerCamelCase : List[Any] = CLIPTextModel(a_)
_lowerCamelCase : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""")
_lowerCamelCase : Union[str, Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0) -> Optional[Any]:
if str(a_).startswith("""mps"""):
_lowerCamelCase : List[Any] = torch.manual_seed(a_)
else:
_lowerCamelCase : List[str] = torch.Generator(device=a_).manual_seed(a_)
_lowerCamelCase : Optional[int] = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def UpperCamelCase_ ( self) -> Optional[int]:
_lowerCamelCase : Tuple = """cpu"""
_lowerCamelCase : Union[str, Any] = self.get_dummy_components()
_lowerCamelCase : Dict = self.pipeline_class(**a_)
pipe.to(a_)
pipe.set_progress_bar_config(disable=a_)
_lowerCamelCase : int = self.get_dummy_inputs(a_)
_lowerCamelCase : List[Any] = pipe(**a_).images
_lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3))
_lowerCamelCase : Union[str, Any] = np.array(
[0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96])
_lowerCamelCase : List[Any] = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(a_ , 1e-3)
def UpperCamelCase_ ( self) -> Optional[int]:
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4)
def UpperCamelCase_ ( self) -> Any:
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def UpperCamelCase_ ( self) -> Dict:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4)
def UpperCamelCase_ ( self) -> List[Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3)
def UpperCamelCase_ ( self) -> Union[str, Any]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4)
def UpperCamelCase_ ( self) -> List[Any]:
super().test_save_load_local(expected_max_difference=5e-4)
def UpperCamelCase_ ( self) -> Optional[int]:
super().test_save_load_optional_components(expected_max_difference=4e-4)
@require_torch_gpu
@slow
class lowercase__ ( unittest.TestCase ):
@classmethod
def UpperCamelCase_ ( cls) -> Union[str, Any]:
super().setUpClass()
torch.use_deterministic_algorithms(a_)
@classmethod
def UpperCamelCase_ ( cls) -> Union[str, Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(a_)
def UpperCamelCase_ ( self) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self) -> Union[str, Any]:
_lowerCamelCase : Union[str, Any] = torch.manual_seed(51)
_lowerCamelCase : Union[str, Any] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=a_ , torch_dtype=torch.floataa)
pipe.to("""cuda""")
_lowerCamelCase : Optional[Any] = """a painting of an elephant with glasses"""
_lowerCamelCase : str = [5, 7]
_lowerCamelCase : int = pipe(
prompt=a_ , token_indices=a_ , guidance_scale=7.5 , generator=a_ , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
_lowerCamelCase : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""")
assert np.abs((expected_image - image).max()) < 5e-1
| 88 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
_snake_case : int = str(snake_case__ )
return len(snake_case__ ) == 9 and set(snake_case__ ) == set("""123456789""" )
def UpperCAmelCase__ ():
"""simple docstring"""
for base_num in range(99_99 , 49_99 , -1 ):
_snake_case : List[Any] = 10_00_02 * base_num
if is_9_pandigital(snake_case__ ):
return candidate
for base_num in range(3_33 , 99 , -1 ):
_snake_case : List[str] = 1_00_20_03 * base_num
if is_9_pandigital(snake_case__ ):
return candidate
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 609 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
snake_case_ = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case_ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str=8 ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
"""simple docstring"""
super().__init__()
self.register_modules(
text_encoder=lowercase__ , tokenizer=lowercase__ , unet=lowercase__ , scheduler=lowercase__ , movq=lowercase__ , )
SCREAMING_SNAKE_CASE_ : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
if latents is None:
SCREAMING_SNAKE_CASE_ : Tuple = randn_tensor(lowercase__ , generator=lowercase__ , device=lowercase__ , dtype=lowercase__ )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
SCREAMING_SNAKE_CASE_ : List[str] = latents.to(lowercase__ )
SCREAMING_SNAKE_CASE_ : int = latents * scheduler.init_noise_sigma
return latents
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = len(lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else 1
# get prompt text embeddings
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer(
lowercase__ , padding="max_length" , truncation=lowercase__ , max_length=77 , return_attention_mask=lowercase__ , add_special_tokens=lowercase__ , return_tensors="pt" , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text_inputs.input_ids
SCREAMING_SNAKE_CASE_ : int = self.tokenizer(lowercase__ , padding="longest" , return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_input_ids.to(lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = text_inputs.attention_mask.to(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.text_encoder(
input_ids=lowercase__ , attention_mask=lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prompt_embeds.repeat_interleave(lowercase__ , dim=0 )
SCREAMING_SNAKE_CASE_ : Tuple = text_encoder_hidden_states.repeat_interleave(lowercase__ , dim=0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = text_mask.repeat_interleave(lowercase__ , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ : List[str]
if negative_prompt is None:
SCREAMING_SNAKE_CASE_ : int = [""] * batch_size
elif type(lowercase__ ) is not type(lowercase__ ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(lowercase__ )} !="
F" {type(lowercase__ )}." )
elif isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = [negative_prompt]
elif batch_size != len(lowercase__ ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(lowercase__ )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = negative_prompt
SCREAMING_SNAKE_CASE_ : str = self.tokenizer(
lowercase__ , padding="max_length" , max_length=77 , truncation=lowercase__ , return_attention_mask=lowercase__ , add_special_tokens=lowercase__ , return_tensors="pt" , )
SCREAMING_SNAKE_CASE_ : List[Any] = uncond_input.input_ids.to(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = uncond_input.attention_mask.to(lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.text_encoder(
input_ids=lowercase__ , attention_mask=lowercase__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE_ : List[Any] = negative_prompt_embeds.shape[1]
SCREAMING_SNAKE_CASE_ : Dict = negative_prompt_embeds.repeat(1 , lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = uncond_text_encoder_hidden_states.shape[1]
SCREAMING_SNAKE_CASE_ : Tuple = uncond_text_encoder_hidden_states.repeat(1 , lowercase__ , 1 )
SCREAMING_SNAKE_CASE_ : str = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , lowercase__ , -1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = uncond_text_mask.repeat_interleave(lowercase__ , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE_ : str = torch.cat([negative_prompt_embeds, prompt_embeds] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
SCREAMING_SNAKE_CASE_ : Dict = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def __lowerCamelCase ( self , lowercase__=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.device(F"cuda:{gpu_id}" )
SCREAMING_SNAKE_CASE_ : Optional[int] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase__ , lowercase__ )
def __lowerCamelCase ( self , lowercase__=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
SCREAMING_SNAKE_CASE_ : List[str] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=lowercase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
SCREAMING_SNAKE_CASE_ : List[Any] = cpu_offload_with_hook(lowercase__ , lowercase__ , prev_module_hook=lowercase__ )
if self.safety_checker is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = cpu_offload_with_hook(self.safety_checker , lowercase__ , prev_module_hook=lowercase__ )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE_ : int = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCamelCase ( self ):
"""simple docstring"""
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase__ )
def __call__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = 512 , lowercase__ = 512 , lowercase__ = 100 , lowercase__ = 4.0 , lowercase__ = 1 , lowercase__ = None , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , ):
"""simple docstring"""
if isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : List[str] = 1
elif isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : List[str] = len(lowercase__ )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowercase__ )}" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._execution_device
SCREAMING_SNAKE_CASE_ : Dict = batch_size * num_images_per_prompt
SCREAMING_SNAKE_CASE_ : Optional[int] = guidance_scale > 1.0
SCREAMING_SNAKE_CASE_ : str = self._encode_prompt(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : int = torch.cat(lowercase__ , dim=0 )
if isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = torch.cat(lowercase__ , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_embeds.repeat_interleave(lowercase__ , dim=0 )
SCREAMING_SNAKE_CASE_ : List[str] = negative_image_embeds.repeat_interleave(lowercase__ , dim=0 )
SCREAMING_SNAKE_CASE_ : List[str] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=lowercase__ )
self.scheduler.set_timesteps(lowercase__ , device=lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.scheduler.timesteps
SCREAMING_SNAKE_CASE_ : Tuple = self.unet.config.in_channels
SCREAMING_SNAKE_CASE_ : Dict = get_new_h_w(lowercase__ , lowercase__ , self.movq_scale_factor )
# create initial latent
SCREAMING_SNAKE_CASE_ : Tuple = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , lowercase__ , lowercase__ , lowercase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase__ ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE_ : List[Any] = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
SCREAMING_SNAKE_CASE_ : Dict = self.unet(
sample=lowercase__ , timestep=lowercase__ , encoder_hidden_states=lowercase__ , added_cond_kwargs=lowercase__ , return_dict=lowercase__ , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE_ : int = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE_ : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE_ : str = self.scheduler.step(
lowercase__ , lowercase__ , lowercase__ , generator=lowercase__ , ).prev_sample
# post-processing
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.movq.decode(lowercase__ , force_not_quantize=lowercase__ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE_ : int = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ : Dict = self.numpy_to_pil(lowercase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__ )
| 700 |
'''simple docstring'''
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.nn.Linear(2 , 4 )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.optim.AdamW(model.parameters() , lr=1.0 )
SCREAMING_SNAKE_CASE_ : Any = torch.optim.lr_scheduler.OneCycleLR(SCREAMING_SNAKE_CASE_ , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
SCREAMING_SNAKE_CASE_ : Dict = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
SCREAMING_SNAKE_CASE_ : Tuple = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[str] ) -> Tuple:
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
@require_cuda
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(lowercase__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = Accelerator(cpu=lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Accelerator()
SCREAMING_SNAKE_CASE_ : Any = GradientState()
assert state.num_steps == 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 4
assert state.num_steps == 4
assert state.sync_gradients is True
SCREAMING_SNAKE_CASE_ : Optional[int] = False
assert state.sync_gradients is False
GradientState._reset_state()
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = Accelerator()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = create_components()
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : Optional[Any] = accelerator.prepare(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = Accelerator()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : str = create_components()
accelerator.prepare(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def __lowerCamelCase ( self ):
"""simple docstring"""
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*lowercase__ , **lowercase__ ):
pass
with patch("torch.cuda.set_device" , lowercase__ ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ):
SCREAMING_SNAKE_CASE_ : List[str] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , "cuda:64" )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = Accelerator()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = create_components()
accelerator.prepare(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = get_signature(lowercase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowercase__ )
# make sure random weights don't match
load_random_weights(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) < 1e-3 )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = Accelerator()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = create_components()
accelerator.prepare(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_signature(lowercase__ )
# saving hook
def save_config(lowercase__ , lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"class_name": models[0].__class__.__name__}
with open(os.path.join(lowercase__ , "data.json" ) , "w" ) as f:
json.dump(lowercase__ , lowercase__ )
# loading hook
def load_config(lowercase__ , lowercase__ ):
with open(os.path.join(lowercase__ , "data.json" ) , "r" ) as f:
SCREAMING_SNAKE_CASE_ : Any = json.load(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = config["class_name"]
SCREAMING_SNAKE_CASE_ : Dict = accelerator.register_save_state_pre_hook(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = accelerator.register_load_state_pre_hook(lowercase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowercase__ )
# make sure random weights don't match with hooks
load_random_weights(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) > 1e-3 )
# random class name to verify correct one is loaded
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "random"
# make sure loaded weights match with hooks
accelerator.load_state(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowercase__ )
# make sure random weights don't match with hooks removed
load_random_weights(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) > 1e-3 )
# random class name to verify correct one is loaded
SCREAMING_SNAKE_CASE_ : Tuple = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = Accelerator()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Tuple = create_components()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
# This should work
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
self.assertTrue(dummy_obj is None )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = Accelerator()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = create_components()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [1, 2, 3]
# This should work
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : int = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Dummy object should have `_is_accelerate_prepared` set to `True`" , )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Model is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
@slow
@require_bnb
def __lowerCamelCase ( self ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowercase__ , device_map={"": 0} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = Accelerator()
# This should work
SCREAMING_SNAKE_CASE_ : List[Any] = accelerator.prepare(lowercase__ )
@slow
@require_bnb
def __lowerCamelCase ( self ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
SCREAMING_SNAKE_CASE_ : Optional[Any] = Accelerator()
with init_empty_weights():
SCREAMING_SNAKE_CASE_ : Tuple = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
SCREAMING_SNAKE_CASE_ : Optional[Any] = infer_auto_device_map(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = "cpu"
SCREAMING_SNAKE_CASE_ : Tuple = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , device_map=lowercase__ , load_in_abit=lowercase__ , llm_inta_enable_fpaa_cpu_offload=lowercase__ )
# This should not work and get value error
with self.assertRaises(lowercase__ ):
SCREAMING_SNAKE_CASE_ : str = accelerator.prepare(lowercase__ )
@slow
@require_bnb
@require_multi_gpu
def __lowerCamelCase ( self ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
SCREAMING_SNAKE_CASE_ : str = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
SCREAMING_SNAKE_CASE_ : str = infer_auto_device_map(lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = 1
SCREAMING_SNAKE_CASE_ : str = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowercase__ , device_map=lowercase__ , )
SCREAMING_SNAKE_CASE_ : Any = Accelerator()
# This should not work and get value error
with self.assertRaises(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Tuple = accelerator.prepare(lowercase__ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def __lowerCamelCase ( self ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
with init_empty_weights():
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = infer_auto_device_map(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = 1
SCREAMING_SNAKE_CASE_ : str = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowercase__ , device_map=lowercase__ , )
SCREAMING_SNAKE_CASE_ : Any = Accelerator()
# This should work
SCREAMING_SNAKE_CASE_ : Optional[int] = accelerator.prepare(lowercase__ )
@require_cuda
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = torch.nn.Linear(10 , 10 )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.optim.SGD(model.parameters() , lr=0.01 )
SCREAMING_SNAKE_CASE_ : Tuple = Accelerator(cpu=lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = accelerator.prepare(lowercase__ )
| 68 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE : int = {
'''configuration_altclip''': [
'''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AltCLIPConfig''',
'''AltCLIPTextConfig''',
'''AltCLIPVisionConfig''',
],
'''processing_altclip''': ['''AltCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = [
'''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AltCLIPPreTrainedModel''',
'''AltCLIPModel''',
'''AltCLIPTextModel''',
'''AltCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 156 |
"""simple docstring"""
class __lowerCamelCase :
def __init__(self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = val
_lowerCAmelCase = None
_lowerCAmelCase = None
def A__ (self , lowerCamelCase ):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
_lowerCAmelCase = Node(lowerCamelCase )
else:
self.left.insert(lowerCamelCase )
elif val > self.val:
if self.right is None:
_lowerCAmelCase = Node(lowerCamelCase )
else:
self.right.insert(lowerCamelCase )
else:
_lowerCAmelCase = val
def __UpperCAmelCase ( snake_case_ : List[str] , snake_case_ : Any ) -> Dict:
"""simple docstring"""
if root:
inorder(root.left , snake_case_ )
res.append(root.val )
inorder(root.right , snake_case_ )
def __UpperCAmelCase ( snake_case_ : str ) -> List[Any]:
"""simple docstring"""
if len(snake_case_ ) == 0:
return arr
_lowerCAmelCase = Node(arr[0] )
for i in range(1 , len(snake_case_ ) ):
root.insert(arr[i] )
# Traverse BST in order.
_lowerCAmelCase = []
inorder(snake_case_ , snake_case_ )
return res
if __name__ == "__main__":
print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3])) | 156 | 1 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session" )
def snake_case_ ( ) -> Tuple:
lowercase__ : Union[str, Any] = 10
lowercase__ : Dict = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string" ) ),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ),
"answers": datasets.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
"id": datasets.Value("int64" ),
} )
lowercase__ : Any = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [97], "text": ["1976"]}] * 10,
"id": list(range(SCREAMING_SNAKE_CASE_ ) ),
} ,features=SCREAMING_SNAKE_CASE_ ,)
return dataset
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> List[str]:
lowercase__ : int = str(tmp_path_factory.mktemp("data" ) / "file.arrow" )
dataset.map(cache_file_name=SCREAMING_SNAKE_CASE_ )
return filename
# FILE_CONTENT + files
__a : Tuple = '''\
Text data.
Second line of data.'''
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
lowercase__ : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.txt"
lowercase__ : int = FILE_CONTENT
with open(SCREAMING_SNAKE_CASE_ ,"w" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
return filename
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
import bza
lowercase__ : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.txt.bz2"
lowercase__ : Dict = bytes(SCREAMING_SNAKE_CASE_ ,"utf-8" )
with bza.open(SCREAMING_SNAKE_CASE_ ,"wb" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
return path
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
import gzip
lowercase__ : List[Any] = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" )
lowercase__ : Tuple = bytes(SCREAMING_SNAKE_CASE_ ,"utf-8" )
with gzip.open(SCREAMING_SNAKE_CASE_ ,"wb" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
return path
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Dict:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
lowercase__ : Tuple = tmp_path_factory.mktemp("data" ) / "file.txt.lz4"
lowercase__ : Any = bytes(SCREAMING_SNAKE_CASE_ ,"utf-8" )
with lza.frame.open(SCREAMING_SNAKE_CASE_ ,"wb" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
return path
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Tuple:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
lowercase__ : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.txt.7z"
with pyazr.SevenZipFile(SCREAMING_SNAKE_CASE_ ,"w" ) as archive:
archive.write(SCREAMING_SNAKE_CASE_ ,arcname=os.path.basename(SCREAMING_SNAKE_CASE_ ) )
return path
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
import tarfile
lowercase__ : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.txt.tar"
with tarfile.TarFile(SCREAMING_SNAKE_CASE_ ,"w" ) as f:
f.add(SCREAMING_SNAKE_CASE_ ,arcname=os.path.basename(SCREAMING_SNAKE_CASE_ ) )
return path
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> List[str]:
import lzma
lowercase__ : Tuple = tmp_path_factory.mktemp("data" ) / "file.txt.xz"
lowercase__ : int = bytes(SCREAMING_SNAKE_CASE_ ,"utf-8" )
with lzma.open(SCREAMING_SNAKE_CASE_ ,"wb" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
return path
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
import zipfile
lowercase__ : Dict = tmp_path_factory.mktemp("data" ) / "file.txt.zip"
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ,"w" ) as f:
f.write(SCREAMING_SNAKE_CASE_ ,arcname=os.path.basename(SCREAMING_SNAKE_CASE_ ) )
return path
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
lowercase__ : Tuple = tmp_path_factory.mktemp("data" ) / "file.txt.zst"
lowercase__ : Dict = bytes(SCREAMING_SNAKE_CASE_ ,"utf-8" )
with zstd.open(SCREAMING_SNAKE_CASE_ ,"wb" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
return path
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowercase__ : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.xml"
lowercase__ : List[str] = textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" )
with open(SCREAMING_SNAKE_CASE_ ,"w" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
return filename
__a : Union[str, Any] = [
{'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0},
]
__a : List[Any] = [
{'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0},
{'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0},
]
__a : Dict = {
'''col_1''': ['''0''', '''1''', '''2''', '''3'''],
'''col_2''': [0, 1, 2, 3],
'''col_3''': [0.0, 1.0, 2.0, 3.0],
}
__a : Dict = [
{'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0},
{'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1},
]
__a : List[str] = [
{'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0},
]
@pytest.fixture(scope="session" )
def snake_case_ ( ) -> Any:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
lowercase__ : List[str] = datasets.Dataset.from_dict(SCREAMING_SNAKE_CASE_ )
lowercase__ : List[str] = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" )
dataset.map(cache_file_name=SCREAMING_SNAKE_CASE_ )
return path
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Any:
lowercase__ : str = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" )
with contextlib.closing(sqlitea.connect(SCREAMING_SNAKE_CASE_ ) ) as con:
lowercase__ : Dict = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" )
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)" ,tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Any:
lowercase__ : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" )
with open(SCREAMING_SNAKE_CASE_ ,"w" ,newline="" ) as f:
lowercase__ : Optional[int] = csv.DictWriter(SCREAMING_SNAKE_CASE_ ,fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(SCREAMING_SNAKE_CASE_ )
return path
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
lowercase__ : Union[str, Any] = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" )
with open(SCREAMING_SNAKE_CASE_ ,"w" ,newline="" ) as f:
lowercase__ : Optional[int] = csv.DictWriter(SCREAMING_SNAKE_CASE_ ,fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(SCREAMING_SNAKE_CASE_ )
return path
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Any:
import bza
lowercase__ : Optional[Any] = tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2"
with open(SCREAMING_SNAKE_CASE_ ,"rb" ) as f:
lowercase__ : Union[str, Any] = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(SCREAMING_SNAKE_CASE_ ,"wb" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
return path
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> int:
lowercase__ : str = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ,"w" ) as f:
f.write(SCREAMING_SNAKE_CASE_ ,arcname=os.path.basename(SCREAMING_SNAKE_CASE_ ) )
f.write(SCREAMING_SNAKE_CASE_ ,arcname=os.path.basename(SCREAMING_SNAKE_CASE_ ) )
return path
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> int:
lowercase__ : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ,"w" ) as f:
f.write(SCREAMING_SNAKE_CASE_ ,arcname=os.path.basename(csv_path.replace(".csv" ,".CSV" ) ) )
f.write(SCREAMING_SNAKE_CASE_ ,arcname=os.path.basename(csva_path.replace(".csv" ,".CSV" ) ) )
return path
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> List[str]:
lowercase__ : Optional[int] = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ,"w" ) as f:
f.write(SCREAMING_SNAKE_CASE_ ,arcname=os.path.join("main_dir" ,os.path.basename(SCREAMING_SNAKE_CASE_ ) ) )
f.write(SCREAMING_SNAKE_CASE_ ,arcname=os.path.join("main_dir" ,os.path.basename(SCREAMING_SNAKE_CASE_ ) ) )
return path
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> List[str]:
lowercase__ : List[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" )
lowercase__ : List[Any] = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
} )
with open(SCREAMING_SNAKE_CASE_ ,"wb" ) as f:
lowercase__ : Union[str, Any] = pq.ParquetWriter(SCREAMING_SNAKE_CASE_ ,schema=SCREAMING_SNAKE_CASE_ )
lowercase__ : List[str] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(SCREAMING_SNAKE_CASE_ ) )] for k in DATA[0]} ,schema=SCREAMING_SNAKE_CASE_ )
writer.write_table(SCREAMING_SNAKE_CASE_ )
writer.close()
return path
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> str:
lowercase__ : Any = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
lowercase__ : int = {"data": DATA}
with open(SCREAMING_SNAKE_CASE_ ,"w" ) as f:
json.dump(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
return path
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> str:
lowercase__ : str = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
lowercase__ : Optional[Any] = {"data": DATA_DICT_OF_LISTS}
with open(SCREAMING_SNAKE_CASE_ ,"w" ) as f:
json.dump(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
return path
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
lowercase__ : List[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" )
with open(SCREAMING_SNAKE_CASE_ ,"w" ) as f:
for item in DATA:
f.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
lowercase__ : str = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" )
with open(SCREAMING_SNAKE_CASE_ ,"w" ) as f:
for item in DATA:
f.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
lowercase__ : List[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" )
with open(SCREAMING_SNAKE_CASE_ ,"w" ) as f:
for item in DATA_312:
f.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
lowercase__ : Dict = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" )
with open(SCREAMING_SNAKE_CASE_ ,"w" ) as f:
for item in DATA_STR:
f.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Any:
import gzip
lowercase__ : int = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" )
with open(SCREAMING_SNAKE_CASE_ ,"rb" ) as orig_file:
with gzip.open(SCREAMING_SNAKE_CASE_ ,"wb" ) as zipped_file:
zipped_file.writelines(SCREAMING_SNAKE_CASE_ )
return path
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> List[Any]:
import gzip
lowercase__ : List[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" )
with open(SCREAMING_SNAKE_CASE_ ,"rb" ) as orig_file:
with gzip.open(SCREAMING_SNAKE_CASE_ ,"wb" ) as zipped_file:
zipped_file.writelines(SCREAMING_SNAKE_CASE_ )
return path
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> int:
lowercase__ : Any = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip"
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ,"w" ) as f:
f.write(SCREAMING_SNAKE_CASE_ ,arcname=os.path.basename(SCREAMING_SNAKE_CASE_ ) )
f.write(SCREAMING_SNAKE_CASE_ ,arcname=os.path.basename(SCREAMING_SNAKE_CASE_ ) )
return path
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Tuple:
lowercase__ : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip"
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ,"w" ) as f:
f.write(SCREAMING_SNAKE_CASE_ ,arcname=os.path.join("nested" ,os.path.basename(SCREAMING_SNAKE_CASE_ ) ) )
return path
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
lowercase__ : str = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ,"w" ) as f:
f.write(SCREAMING_SNAKE_CASE_ ,arcname=os.path.join("main_dir" ,os.path.basename(SCREAMING_SNAKE_CASE_ ) ) )
f.write(SCREAMING_SNAKE_CASE_ ,arcname=os.path.join("main_dir" ,os.path.basename(SCREAMING_SNAKE_CASE_ ) ) )
return path
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowercase__ : str = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar"
with tarfile.TarFile(SCREAMING_SNAKE_CASE_ ,"w" ) as f:
f.add(SCREAMING_SNAKE_CASE_ ,arcname=os.path.basename(SCREAMING_SNAKE_CASE_ ) )
f.add(SCREAMING_SNAKE_CASE_ ,arcname=os.path.basename(SCREAMING_SNAKE_CASE_ ) )
return path
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> List[str]:
lowercase__ : Optional[int] = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar"
with tarfile.TarFile(SCREAMING_SNAKE_CASE_ ,"w" ) as f:
f.add(SCREAMING_SNAKE_CASE_ ,arcname=os.path.join("nested" ,os.path.basename(SCREAMING_SNAKE_CASE_ ) ) )
return path
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Any:
lowercase__ : List[str] = ["0", "1", "2", "3"]
lowercase__ : Dict = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" )
with open(SCREAMING_SNAKE_CASE_ ,"w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowercase__ : Union[str, Any] = ["0", "1", "2", "3"]
lowercase__ : Dict = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" )
with open(SCREAMING_SNAKE_CASE_ ,"w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
lowercase__ : List[str] = ["0", "1", "2", "3"]
lowercase__ : str = tmp_path_factory.mktemp("data" ) / "dataset.abc"
with open(SCREAMING_SNAKE_CASE_ ,"w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
lowercase__ : Any = tmp_path_factory.mktemp("data" ) / "dataset.text.zip"
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ,"w" ) as f:
f.write(SCREAMING_SNAKE_CASE_ ,arcname=os.path.basename(SCREAMING_SNAKE_CASE_ ) )
f.write(SCREAMING_SNAKE_CASE_ ,arcname=os.path.basename(SCREAMING_SNAKE_CASE_ ) )
return path
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Dict:
lowercase__ : Any = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip"
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ,"w" ) as f:
f.write(SCREAMING_SNAKE_CASE_ ,arcname=os.path.join("main_dir" ,os.path.basename(SCREAMING_SNAKE_CASE_ ) ) )
f.write(SCREAMING_SNAKE_CASE_ ,arcname=os.path.join("main_dir" ,os.path.basename(SCREAMING_SNAKE_CASE_ ) ) )
return path
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Dict:
lowercase__ : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset.ext.zip"
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ,"w" ) as f:
f.write(SCREAMING_SNAKE_CASE_ ,arcname=os.path.basename("unsupported.ext" ) )
f.write(SCREAMING_SNAKE_CASE_ ,arcname=os.path.basename("unsupported_2.ext" ) )
return path
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> str:
lowercase__ : Any = "\n".join(["First", "Second\u2029with Unicode new line", "Third"] )
lowercase__ : List[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" )
with open(SCREAMING_SNAKE_CASE_ ,"w" ,encoding="utf-8" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
return path
@pytest.fixture(scope="session" )
def snake_case_ ( ) -> List[Any]:
return os.path.join("tests" ,"features" ,"data" ,"test_image_rgb.jpg" )
@pytest.fixture(scope="session" )
def snake_case_ ( ) -> Tuple:
return os.path.join("tests" ,"features" ,"data" ,"test_audio_44100.wav" )
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
lowercase__ : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset.img.zip"
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ,"w" ) as f:
f.write(SCREAMING_SNAKE_CASE_ ,arcname=os.path.basename(SCREAMING_SNAKE_CASE_ ) )
f.write(SCREAMING_SNAKE_CASE_ ,arcname=os.path.basename(SCREAMING_SNAKE_CASE_ ).replace(".jpg" ,"2.jpg" ) )
return path
@pytest.fixture(scope="session" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> List[str]:
lowercase__ : Tuple = tmp_path_factory.mktemp("data_dir" )
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt" ,"w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / "subdir" / "test.txt" ,"w" ) as f:
f.write("bar\n" * 10 )
# hidden file
with open(data_dir / "subdir" / ".test.txt" ,"w" ) as f:
f.write("bar\n" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt" ,"w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / ".subdir" / "test.txt" ,"w" ) as f:
f.write("bar\n" * 10 )
return data_dir | 298 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def __a ( self ) -> int:
"""simple docstring"""
lowercase__ : str = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
lowercase__ : Optional[Any] = AutoTokenizer.from_pretrained("google/mt5-small" )
lowercase__ : Optional[int] = tokenizer("Hello there" , return_tensors="np" ).input_ids
lowercase__ : Optional[Any] = tokenizer("Hi I am" , return_tensors="np" ).input_ids
lowercase__ : int = shift_tokens_right(lowerCamelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
lowercase__ : List[Any] = model(lowerCamelCase , decoder_input_ids=lowerCamelCase ).logits
lowercase__ : Any = optax.softmax_cross_entropy(lowerCamelCase , onehot(lowerCamelCase , logits.shape[-1] ) ).mean()
lowercase__ : Union[str, Any] = -(labels.shape[-1] * loss.item())
lowercase__ : Any = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 ) | 298 | 1 |
"""simple docstring"""
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__:Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Optional[int] = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
SCREAMING_SNAKE_CASE__:Dict = {
"""b0""": {
"""hidden_dim""": 1280,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 224,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1280,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 240,
"""dropout_rate""": 0.2,
"""dw_padding""": [16],
},
"""b2""": {
"""hidden_dim""": 1408,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 260,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 16],
},
"""b3""": {
"""hidden_dim""": 1536,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 300,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 18],
},
"""b4""": {
"""hidden_dim""": 1792,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 380,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2048,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 456,
"""dropout_rate""": 0.4,
"""dw_padding""": [13, 27],
},
"""b6""": {
"""hidden_dim""": 2304,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 528,
"""dropout_rate""": 0.5,
"""dw_padding""": [31],
},
"""b7""": {
"""hidden_dim""": 2560,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 600,
"""dropout_rate""": 0.5,
"""dw_padding""": [18],
},
}
def _lowerCamelCase( a ):
__a = EfficientNetConfig()
__a = CONFIG_MAP[model_name]["hidden_dim"]
__a = CONFIG_MAP[model_name]["width_coef"]
__a = CONFIG_MAP[model_name]["depth_coef"]
__a = CONFIG_MAP[model_name]["image_size"]
__a = CONFIG_MAP[model_name]["dropout_rate"]
__a = CONFIG_MAP[model_name]["dw_padding"]
__a = "huggingface/label-files"
__a = "imagenet-1k-id2label.json"
__a = 1_0_0_0
__a = json.load(open(hf_hub_download(a , a , repo_type="dataset" ) , "r" ) )
__a = {int(a ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
return config
def _lowerCamelCase( ):
__a = "http://images.cocodataset.org/val2017/000000039769.jpg"
__a = Image.open(requests.get(a , stream=a ).raw )
return im
def _lowerCamelCase( a ):
__a = CONFIG_MAP[model_name]["image_size"]
__a = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=a , )
return preprocessor
def _lowerCamelCase( a ):
__a = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
__a = sorted(set(a ) )
__a = len(a )
__a = {b: str(a ) for b, i in zip(a , range(a ) )}
__a = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
__a = block_name_mapping[b]
rename_keys.append((F"block{b}_expand_conv/kernel:0", F"encoder.blocks.{hf_b}.expansion.expand_conv.weight") )
rename_keys.append((F"block{b}_expand_bn/gamma:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.weight") )
rename_keys.append((F"block{b}_expand_bn/beta:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.bias") )
rename_keys.append(
(F"block{b}_expand_bn/moving_mean:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_mean") )
rename_keys.append(
(F"block{b}_expand_bn/moving_variance:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_var") )
rename_keys.append(
(F"block{b}_dwconv/depthwise_kernel:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight") )
rename_keys.append((F"block{b}_bn/gamma:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight") )
rename_keys.append((F"block{b}_bn/beta:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias") )
rename_keys.append(
(F"block{b}_bn/moving_mean:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean") )
rename_keys.append(
(F"block{b}_bn/moving_variance:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var") )
rename_keys.append((F"block{b}_se_reduce/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.weight") )
rename_keys.append((F"block{b}_se_reduce/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.bias") )
rename_keys.append((F"block{b}_se_expand/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.weight") )
rename_keys.append((F"block{b}_se_expand/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.bias") )
rename_keys.append(
(F"block{b}_project_conv/kernel:0", F"encoder.blocks.{hf_b}.projection.project_conv.weight") )
rename_keys.append((F"block{b}_project_bn/gamma:0", F"encoder.blocks.{hf_b}.projection.project_bn.weight") )
rename_keys.append((F"block{b}_project_bn/beta:0", F"encoder.blocks.{hf_b}.projection.project_bn.bias") )
rename_keys.append(
(F"block{b}_project_bn/moving_mean:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_mean") )
rename_keys.append(
(F"block{b}_project_bn/moving_variance:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_var") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
__a = {}
for item in rename_keys:
if item[0] in original_param_names:
__a = "efficientnet." + item[1]
__a = "classifier.weight"
__a = "classifier.bias"
return key_mapping
def _lowerCamelCase( a , a , a ):
for key, value in tf_params.items():
if "normalization" in key:
continue
__a = key_mapping[key]
if "_conv" in key and "kernel" in key:
__a = torch.from_numpy(a ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
__a = torch.from_numpy(a ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
__a = torch.from_numpy(np.transpose(a ) )
else:
__a = torch.from_numpy(a )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(a )
@torch.no_grad()
def _lowerCamelCase( a , a , a , a ):
__a = model_classes[model_name](
include_top=a , weights="imagenet" , input_tensor=a , input_shape=a , pooling=a , classes=1_0_0_0 , classifier_activation="softmax" , )
__a = original_model.trainable_variables
__a = original_model.non_trainable_variables
__a = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__a = param.numpy()
__a = list(tf_params.keys() )
# Load HuggingFace model
__a = get_efficientnet_config(a )
__a = EfficientNetForImageClassification(a ).eval()
__a = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
__a = rename_keys(a )
replace_params(a , a , a )
# Initialize preprocessor and preprocess input image
__a = convert_image_processor(a )
__a = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
__a = hf_model(**a )
__a = outputs.logits.detach().numpy()
# Original model inference
__a = False
__a = CONFIG_MAP[model_name]["image_size"]
__a = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
__a = image.img_to_array(a )
__a = np.expand_dims(a , axis=0 )
__a = original_model.predict(a )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(a , a , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(a ):
os.mkdir(a )
# Save converted model and image processor
hf_model.save_pretrained(a )
preprocessor.save_pretrained(a )
if push_to_hub:
# Push model and image processor to hub
print(F"Pushing converted {model_name} to the hub..." )
__a = F"efficientnet-{model_name}"
preprocessor.push_to_hub(a )
hf_model.push_to_hub(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
SCREAMING_SNAKE_CASE__:Optional[int] = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 528 | """simple docstring"""
from __future__ import annotations
import math
def _lowerCamelCase( a , a ):
__a = u
for i in range(1 , a ):
__a = temp * (u - i)
return temp
def _lowerCamelCase( ):
__a = int(input("enter the numbers of values: " ) )
__a = []
for _ in range(a ):
y.append([] )
for i in range(a ):
for j in range(a ):
y[i].append(a )
__a = 0
print("enter the values of parameters in a list: " )
__a = list(map(a , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(a ):
__a = float(input() )
__a = int(input("enter the value to interpolate: " ) )
__a = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , a ):
for j in range(n - i ):
__a = y[j + 1][i - 1] - y[j][i - 1]
__a = y[0][0]
for i in range(1 , a ):
summ += (ucal(a , a ) * y[0][i]) / math.factorial(a )
print(F"the value at {value} is {summ}" )
if __name__ == "__main__":
main()
| 528 | 1 |
from __future__ import annotations
def _UpperCamelCase (a__ :list[list[int]] ):
"""simple docstring"""
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(a__ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(a__ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 548 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCamelCase__ = CLIPImageProcessor()
UpperCamelCase__ = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
UpperCamelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 548 | 1 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __a ( A__ : Tuple ):
for param in module.parameters():
SCREAMING_SNAKE_CASE = False
def __a ( ):
SCREAMING_SNAKE_CASE = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
SCREAMING_SNAKE_CASE = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def __a ( A__ : Tuple ):
SCREAMING_SNAKE_CASE = plt.imshow(A__ )
fig.axes.get_xaxis().set_visible(A__ )
fig.axes.get_yaxis().set_visible(A__ )
plt.show()
def __a ( ):
SCREAMING_SNAKE_CASE = datetime.now()
SCREAMING_SNAKE_CASE = current_time.strftime("%H:%M:%S" )
return timestamp | 16 |
import pytest
__A : Optional[Any] = '__dummy_dataset1__'
__A : Optional[int] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def __a ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __a ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __a ( A__ : Optional[Any] , A__ : List[str] , A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = dataset_loading_script_name
SCREAMING_SNAKE_CASE = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=A__ )
SCREAMING_SNAKE_CASE = script_dir / F"{script_name}.py"
with open(A__ , "w" ) as f:
f.write(A__ )
return str(A__ ) | 16 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a_ : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = ["""GPTSw3Tokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
a_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 445 |
'''simple docstring'''
import math
def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
return math.pow(UpperCAmelCase_ , 2 ) - a
def __snake_case ( UpperCAmelCase_ : float ):
return 2 * x
def __snake_case ( UpperCAmelCase_ : float ):
lowerCamelCase_ = 2.0
while start <= a:
lowerCamelCase_ = math.pow(UpperCAmelCase_ , 2 )
return start
def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : int = 9999 , UpperCAmelCase_ : float = 0.00_0000_0000_0001 ):
if a < 0:
raise ValueError("math domain error" )
lowerCamelCase_ = get_initial_point(UpperCAmelCase_ )
for _ in range(UpperCAmelCase_ ):
lowerCamelCase_ = value
lowerCamelCase_ = value - fx(UpperCAmelCase_ , UpperCAmelCase_ ) / fx_derivative(UpperCAmelCase_ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 445 | 1 |
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowerCamelCase ( UpperCAmelCase__ : str ) -> Tuple:
'''simple docstring'''
if isinstance(UpperCAmelCase__ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class _SCREAMING_SNAKE_CASE:
def __lowerCamelCase ( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : Dict ) -> Union[str, Any]:
pass
def __lowerCamelCase ( self : Dict ) -> Union[str, Any]:
pass
def __lowerCamelCase ( self : Any ) -> str:
pass
def __lowerCamelCase ( self : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any]=None , **UpperCamelCase_ : Any ) -> Tuple:
SCREAMING_SNAKE_CASE__ :Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[str] = TFVisionTextDualEncoderModel(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :int = model(input_ids=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def __lowerCamelCase ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict=None , **UpperCamelCase_ : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Dict = self.get_vision_text_model(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Dict = TFVisionTextDualEncoderModel(vision_model=UpperCamelCase_ , text_model=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = model(input_ids=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __lowerCamelCase ( self : Optional[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :str = self.get_vision_text_model(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[str] = {'vision_model': vision_model, 'text_model': text_model}
SCREAMING_SNAKE_CASE__ :str = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Dict = model(input_ids=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __lowerCamelCase ( self : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any]=None , **UpperCamelCase_ : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Optional[Any] = self.get_vision_text_model(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :int = TFVisionTextDualEncoderModel(vision_model=UpperCamelCase_ , text_model=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = model(input_ids=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Any = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = TFVisionTextDualEncoderModel.from_pretrained(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :str = model(input_ids=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Any = after_output[0].numpy()
SCREAMING_SNAKE_CASE__ :List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase_ , 1e-5 )
def __lowerCamelCase ( self : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any=None , **UpperCamelCase_ : Tuple ) -> Tuple:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Union[str, Any] = self.get_vision_text_model(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :int = TFVisionTextDualEncoderModel(vision_model=UpperCamelCase_ , text_model=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Dict = model(
input_ids=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ , output_attentions=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[str] = output.vision_model_output.attentions
self.assertEqual(len(UpperCamelCase_ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE__ :List[str] = to_atuple(vision_model.config.image_size )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = to_atuple(vision_model.config.patch_size )
SCREAMING_SNAKE_CASE__ :List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
SCREAMING_SNAKE_CASE__ :str = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
SCREAMING_SNAKE_CASE__ :Optional[int] = output.text_model_output.attentions
self.assertEqual(len(UpperCamelCase_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __lowerCamelCase ( self : Dict , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : float ) -> List[str]:
SCREAMING_SNAKE_CASE__ :int = np.abs((a - b) ).max()
self.assertLessEqual(UpperCamelCase_ , UpperCamelCase_ , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def __lowerCamelCase ( self : int ) -> List[Any]:
SCREAMING_SNAKE_CASE__ :List[str] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**UpperCamelCase_ )
def __lowerCamelCase ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**UpperCamelCase_ )
def __lowerCamelCase ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ :List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**UpperCamelCase_ )
def __lowerCamelCase ( self : List[Any] ) -> str:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = self.prepare_config_and_inputs()
self.check_save_load(**UpperCamelCase_ )
def __lowerCamelCase ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ :List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**UpperCamelCase_ )
@slow
def __lowerCamelCase ( self : Tuple ) -> Tuple:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Optional[int] = self.get_pretrained_model_and_inputs()
SCREAMING_SNAKE_CASE__ :List[str] = model_a(**UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[Any] = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = TFVisionTextDualEncoderModel.from_pretrained(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :str = model_a(**UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[Any] = after_outputs[0].numpy()
SCREAMING_SNAKE_CASE__ :Optional[int] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase_ , 1e-5 )
@require_tf
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
def __lowerCamelCase ( self : List[str] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ :Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert' )
SCREAMING_SNAKE_CASE__ :Tuple = 13
SCREAMING_SNAKE_CASE__ :str = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
SCREAMING_SNAKE_CASE__ :Optional[int] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
SCREAMING_SNAKE_CASE__ :Dict = random_attention_mask([batch_size, 4] )
SCREAMING_SNAKE_CASE__ :List[str] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __lowerCamelCase ( self : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any ) -> str:
SCREAMING_SNAKE_CASE__ :List[str] = TFViTModel(UpperCamelCase_ , name='vision_model' )
SCREAMING_SNAKE_CASE__ :Optional[int] = TFBertModel(UpperCamelCase_ , name='text_model' )
return vision_model, text_model
def __lowerCamelCase ( self : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE__ :Tuple = TFViTModelTester(self )
SCREAMING_SNAKE_CASE__ :List[str] = TFBertModelTester(self )
SCREAMING_SNAKE_CASE__ :Optional[int] = vit_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ :Optional[int] = bert_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Union[str, Any] = vision_config_and_inputs
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) :Any = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
def __lowerCamelCase ( self : List[str] ) -> Tuple:
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
SCREAMING_SNAKE_CASE__ :Union[str, Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta' )
SCREAMING_SNAKE_CASE__ :int = 13
SCREAMING_SNAKE_CASE__ :str = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
SCREAMING_SNAKE_CASE__ :int = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
SCREAMING_SNAKE_CASE__ :Optional[int] = random_attention_mask([batch_size, 4] )
SCREAMING_SNAKE_CASE__ :Dict = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __lowerCamelCase ( self : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Dict ) -> Dict:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Optional[Any] = self.get_vision_text_model(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[Any] = TFVisionTextDualEncoderModel(vision_model=UpperCamelCase_ , text_model=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = model(
input_ids=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ , output_attentions=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :int = output.vision_model_output.attentions
self.assertEqual(len(UpperCamelCase_ ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
SCREAMING_SNAKE_CASE__ :Dict = to_atuple(vision_model.config.image_size )
SCREAMING_SNAKE_CASE__ :List[str] = to_atuple(vision_model.config.patch_size )
SCREAMING_SNAKE_CASE__ :Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
SCREAMING_SNAKE_CASE__ :Optional[int] = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
SCREAMING_SNAKE_CASE__ :Optional[int] = output.text_model_output.attentions
self.assertEqual(len(UpperCamelCase_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __lowerCamelCase ( self : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ :Tuple = TFDeiTModel(UpperCamelCase_ , name='vision_model' )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = TFRobertaModel(UpperCamelCase_ , name='text_model' )
return vision_model, text_model
def __lowerCamelCase ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ :List[str] = TFDeiTModelTester(self )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = TFRobertaModelTester(self )
SCREAMING_SNAKE_CASE__ :Dict = vit_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ :List[Any] = bert_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Any = vision_config_and_inputs
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) :Optional[Any] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
def __lowerCamelCase ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :int = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert' )
SCREAMING_SNAKE_CASE__ :int = 13
SCREAMING_SNAKE_CASE__ :int = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
SCREAMING_SNAKE_CASE__ :Optional[int] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
SCREAMING_SNAKE_CASE__ :Dict = random_attention_mask([batch_size, 4] )
SCREAMING_SNAKE_CASE__ :Optional[Any] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __lowerCamelCase ( self : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] ) -> int:
SCREAMING_SNAKE_CASE__ :Tuple = TFCLIPVisionModel(UpperCamelCase_ , name='vision_model' )
SCREAMING_SNAKE_CASE__ :List[str] = TFBertModel(UpperCamelCase_ , name='text_model' )
return vision_model, text_model
def __lowerCamelCase ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ :Any = TFCLIPVisionModelTester(self )
SCREAMING_SNAKE_CASE__ :Optional[int] = TFBertModelTester(self )
SCREAMING_SNAKE_CASE__ :Tuple = clip_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ :str = bert_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Union[str, Any] = vision_config_and_inputs
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) :Tuple = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
@slow
def __lowerCamelCase ( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ :Optional[Any] = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Dict = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
SCREAMING_SNAKE_CASE__ :int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
SCREAMING_SNAKE_CASE__ :Optional[Any] = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors='np' )
SCREAMING_SNAKE_CASE__ :Any = model(**UpperCamelCase_ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = np.array([[1.228_4727, 0.310_4122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , UpperCamelCase_ , atol=1e-3 ) )
| 209 | '''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
UpperCamelCase_ = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
UpperCamelCase_ = '''pt''' if is_torch_available() else '''tf'''
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ : Tuple = CamembertTokenizer
A_ : Dict = CamembertTokenizerFast
A_ : Any = True
A_ : List[Any] = True
def __lowerCamelCase ( self : int ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ :Optional[int] = CamembertTokenizer(UpperCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self : int ) -> int:
SCREAMING_SNAKE_CASE__ :List[Any] = '<pad>'
SCREAMING_SNAKE_CASE__ :Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def __lowerCamelCase ( self : int ) -> int:
SCREAMING_SNAKE_CASE__ :Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>NOTUSED' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(UpperCamelCase_ ) , 10_04 )
def __lowerCamelCase ( self : List[str] ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 10_05 )
def __lowerCamelCase ( self : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ :Any = CamembertTokenizer(UpperCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ :Dict = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ :Optional[int] = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ :List[str] = tokenizer.encode(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :str = rust_tokenizer.encode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :int = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Dict = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
SCREAMING_SNAKE_CASE__ :int = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Tuple = rust_tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def __lowerCamelCase ( self : Optional[Any] ) -> List[str]:
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__ :List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ :Dict = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ :Optional[Any] = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ :Optional[int] = tokenizer.tokenize(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :str = rust_tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[int] = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :str = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ :Union[str, Any] = tokenizer.encode(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = rust_tokenizer.encode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
@slow
def __lowerCamelCase ( self : Union[str, Any] ) -> Any:
# fmt: off
SCREAMING_SNAKE_CASE__ :Union[str, Any] = {'input_ids': [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
SCREAMING_SNAKE_CASE__ :Union[str, Any] = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name='camembert-base' , revision='3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf' , sequences=UpperCamelCase_ , )
| 209 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: Any ,__UpperCamelCase: Union[str, Any]=None ,__UpperCamelCase: Dict=None ):
"""simple docstring"""
if attention_mask is None:
SCREAMING_SNAKE_CASE : str = tf.cast(tf.math.not_equal(__UpperCamelCase ,config.pad_token_id ) ,tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class _a :
'''simple docstring'''
A : Tuple = OPTConfig
A : Optional[Any] = {}
A : Optional[int] = '''gelu'''
def __init__( self, A, A=13, A=7, A=True, A=False, A=99, A=16, A=2, A=4, A=4, A="gelu", A=0.1, A=0.1, A=20, A=2, A=1, A=0, A=16, A=16, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : str = seq_length
SCREAMING_SNAKE_CASE : List[str] = is_training
SCREAMING_SNAKE_CASE : int = use_labels
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : str = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : int = eos_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = pad_token_id
SCREAMING_SNAKE_CASE : Dict = bos_token_id
SCREAMING_SNAKE_CASE : Union[str, Any] = embed_dim
SCREAMING_SNAKE_CASE : Optional[Any] = word_embed_proj_dim
SCREAMING_SNAKE_CASE : Optional[Any] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
SCREAMING_SNAKE_CASE : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
SCREAMING_SNAKE_CASE : Optional[int] = tf.concat([input_ids, eos_tensor], axis=1 )
SCREAMING_SNAKE_CASE : Any = self.config_cls(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, embed_dim=self.embed_dim, word_embed_proj_dim=self.word_embed_proj_dim, is_encoder_decoder=A, **self.config_updates, )
SCREAMING_SNAKE_CASE : List[str] = prepare_opt_inputs_dict(A, A )
return config, inputs_dict
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = TFOPTModel(config=A )
SCREAMING_SNAKE_CASE : Optional[Any] = inputs_dict['input_ids']
SCREAMING_SNAKE_CASE : Optional[int] = input_ids[:1, :]
SCREAMING_SNAKE_CASE : Dict = inputs_dict['attention_mask'][:1, :]
SCREAMING_SNAKE_CASE : Optional[int] = 1
# first forward pass
SCREAMING_SNAKE_CASE : Optional[Any] = model(A, attention_mask=A, use_cache=A )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : Any = ids_tensor((self.batch_size, 3), config.vocab_size )
SCREAMING_SNAKE_CASE : Optional[int] = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE : Tuple = tf.concat([input_ids, next_tokens], axis=-1 )
SCREAMING_SNAKE_CASE : int = tf.concat([attention_mask, next_attn_mask], axis=-1 )
SCREAMING_SNAKE_CASE : Dict = model(A, attention_mask=A )[0]
SCREAMING_SNAKE_CASE : List[str] = model(A, attention_mask=A, past_key_values=A )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE : str = int(ids_tensor((1,), output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE : Dict = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE : str = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A, A, rtol=1E-3 )
@require_tf
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : List[Any] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
A : Any = (TFOPTForCausalLM,) if is_tf_available() else ()
A : List[Any] = (
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
A : Any = False
A : Any = False
A : int = False
A : Optional[Any] = 10
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = TFOPTModelTester(self )
SCREAMING_SNAKE_CASE : Any = ConfigTester(self, config_class=A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(A, A ):
if hasattr(A, 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(A, 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
SCREAMING_SNAKE_CASE : List[Any] = model_class(config=A )
SCREAMING_SNAKE_CASE : Dict = _get_word_embedding_weight(A, model.get_input_embeddings() )
SCREAMING_SNAKE_CASE : Optional[int] = _get_word_embedding_weight(A, model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(A )
SCREAMING_SNAKE_CASE : str = _get_word_embedding_weight(A, model.get_input_embeddings() )
SCREAMING_SNAKE_CASE : Any = _get_word_embedding_weight(A, model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
SCREAMING_SNAKE_CASE : Dict = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0], A )
# check that weights remain the same after resizing
SCREAMING_SNAKE_CASE : Dict = True
for pa, pa in zip(old_input_embeddings.value(), new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
SCREAMING_SNAKE_CASE : Any = False
self.assertTrue(A )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0], A )
SCREAMING_SNAKE_CASE : int = True
for pa, pa in zip(old_output_embeddings.value(), new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
SCREAMING_SNAKE_CASE : List[Any] = False
self.assertTrue(A )
def lowercase__( __UpperCamelCase: Optional[int] ):
"""simple docstring"""
return tf.constant(__UpperCamelCase ,dtype=tf.intaa )
@require_tf
class _a ( unittest.TestCase ):
'''simple docstring'''
A : List[Any] = 99
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = tf.ones((4, 1), dtype=tf.intaa ) * 2
SCREAMING_SNAKE_CASE : List[Any] = tf.concat([ids_tensor((4, 6), self.vocab_size - 3 ) + 3, eos_column_vector], axis=1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = input_ids.shape[0]
SCREAMING_SNAKE_CASE : Optional[Any] = OPTConfig(
vocab_size=self.vocab_size, hidden_size=24, num_hidden_layers=2, num_attention_heads=2, ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class _a ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = TFOPTModel.from_pretrained('facebook/opt-350m' )
SCREAMING_SNAKE_CASE : Dict = _long_tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
SCREAMING_SNAKE_CASE : Dict = tf.not_equal(A, model.config.pad_token_id )
with tf.GradientTape():
SCREAMING_SNAKE_CASE : str = model(input_ids=A, attention_mask=A ).last_hidden_state
SCREAMING_SNAKE_CASE : Union[str, Any] = (1, 11, 512)
self.assertEqual(output.shape, A )
SCREAMING_SNAKE_CASE : Dict = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3], A, atol=4E-3 ) )
SCREAMING_SNAKE_CASE : Any = tf.function(A, jit_compile=A )
SCREAMING_SNAKE_CASE : Any = xla_generate(A, A )[0]
self.assertTrue(np.allclose(output[:, :3, :3], A, atol=4E-2 ) )
@require_tf
@slow
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : List[Any] = 'facebook/opt-350m'
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = TFOPTForCausalLM.from_pretrained(self.path_model )
SCREAMING_SNAKE_CASE : Dict = GPTaTokenizer.from_pretrained(self.path_model )
SCREAMING_SNAKE_CASE : str = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
SCREAMING_SNAKE_CASE : Tuple = tokenizer(A, return_tensors='tf', padding=A, add_special_tokens=A )
SCREAMING_SNAKE_CASE : Tuple = tf.math.reduce_mean(model(inputs.input_ids, attention_mask=inputs.attention_mask )[0], axis=-1 )
SCREAMING_SNAKE_CASE : Optional[int] = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(A, A, atol=1E-4 ) )
SCREAMING_SNAKE_CASE : Any = tf.function(A, jit_compile=A )
SCREAMING_SNAKE_CASE : Any = tf.math.reduce_mean(xla_generate(inputs.input_ids, attention_mask=inputs.attention_mask )[0], axis=-1 )
self.assertTrue(np.allclose(A, A, atol=1E-4 ) )
@require_tf
@slow
class _a ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 'facebook/opt-125m'
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Any = GPTaTokenizer.from_pretrained(A )
SCREAMING_SNAKE_CASE : List[Any] = TFOPTForCausalLM.from_pretrained(A )
for prompt in self.prompts:
SCREAMING_SNAKE_CASE : Dict = tokenizer(A, return_tensors='tf' ).input_ids
SCREAMING_SNAKE_CASE : Dict = model.generate(A, max_length=10 )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.batch_decode(A, skip_special_tokens=A )
predicted_outputs += generated_string
self.assertListEqual(A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 'facebook/opt-350m'
SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaTokenizer.from_pretrained(A )
SCREAMING_SNAKE_CASE : List[str] = TFOPTForCausalLM.from_pretrained(A )
SCREAMING_SNAKE_CASE : Optional[Any] = 'left'
# use different length sentences to test batching
SCREAMING_SNAKE_CASE : str = [
'Hello, my dog is a little',
'Today, I',
]
SCREAMING_SNAKE_CASE : Dict = tokenizer(A, return_tensors='tf', padding=A )
SCREAMING_SNAKE_CASE : List[Any] = inputs['input_ids']
SCREAMING_SNAKE_CASE : int = model.generate(input_ids=A, attention_mask=inputs['attention_mask'] )
SCREAMING_SNAKE_CASE : int = tokenizer(sentences[0], return_tensors='tf' ).input_ids
SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(input_ids=A )
SCREAMING_SNAKE_CASE : Optional[Any] = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1], tf.intaa ) )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(sentences[1], return_tensors='tf' ).input_ids
SCREAMING_SNAKE_CASE : List[str] = model.generate(input_ids=A, max_length=model.config.max_length - num_paddings )
SCREAMING_SNAKE_CASE : str = tokenizer.batch_decode(A, skip_special_tokens=A )
SCREAMING_SNAKE_CASE : str = tokenizer.decode(output_non_padded[0], skip_special_tokens=A )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.decode(output_padded[0], skip_special_tokens=A )
SCREAMING_SNAKE_CASE : str = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(A, A )
self.assertListEqual(A, [non_padded_sentence, padded_sentence] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = 'facebook/opt-350m'
SCREAMING_SNAKE_CASE : Dict = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Optional[Any] = GPTaTokenizer.from_pretrained(A )
SCREAMING_SNAKE_CASE : Any = TFOPTForCausalLM.from_pretrained(A )
for prompt in self.prompts:
SCREAMING_SNAKE_CASE : List[str] = tokenizer(A, return_tensors='tf' ).input_ids
SCREAMING_SNAKE_CASE : Optional[int] = model.generate(A, max_length=10 )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(A, skip_special_tokens=A )
predicted_outputs += generated_string
self.assertListEqual(A, A )
| 508 |
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def lowercase__( __UpperCamelCase: bytes ,__UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = f"{sampling_rate}"
SCREAMING_SNAKE_CASE : str = '1'
SCREAMING_SNAKE_CASE : Optional[Any] = 'f32le'
SCREAMING_SNAKE_CASE : Any = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(__UpperCamelCase ,stdin=subprocess.PIPE ,stdout=subprocess.PIPE ) as ffmpeg_process:
SCREAMING_SNAKE_CASE : Tuple = ffmpeg_process.communicate(__UpperCamelCase )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
SCREAMING_SNAKE_CASE : Union[str, Any] = output_stream[0]
SCREAMING_SNAKE_CASE : Dict = np.frombuffer(__UpperCamelCase ,np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: float ,__UpperCamelCase: str = "f32le" ,):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = f"{sampling_rate}"
SCREAMING_SNAKE_CASE : str = '1'
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE : Optional[Any] = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE : Optional[int] = 4
else:
raise ValueError(f"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
SCREAMING_SNAKE_CASE : Optional[Any] = platform.system()
if system == "Linux":
SCREAMING_SNAKE_CASE : List[str] = 'alsa'
SCREAMING_SNAKE_CASE : str = 'default'
elif system == "Darwin":
SCREAMING_SNAKE_CASE : Dict = 'avfoundation'
SCREAMING_SNAKE_CASE : int = ':0'
elif system == "Windows":
SCREAMING_SNAKE_CASE : str = 'dshow'
SCREAMING_SNAKE_CASE : Any = 'default'
SCREAMING_SNAKE_CASE : Any = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
SCREAMING_SNAKE_CASE : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = _ffmpeg_stream(__UpperCamelCase ,__UpperCamelCase )
for item in iterator:
yield item
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: float ,__UpperCamelCase: Optional[int] = None ,__UpperCamelCase: Optional[Union[Tuple[float, float], float]] = None ,__UpperCamelCase: str = "f32le" ,):
"""simple docstring"""
if stream_chunk_s is not None:
SCREAMING_SNAKE_CASE : Any = stream_chunk_s
else:
SCREAMING_SNAKE_CASE : Dict = chunk_length_s
SCREAMING_SNAKE_CASE : Tuple = ffmpeg_microphone(__UpperCamelCase ,__UpperCamelCase ,format_for_conversion=__UpperCamelCase )
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE : Optional[int] = np.intaa
SCREAMING_SNAKE_CASE : List[Any] = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE : Optional[int] = np.floataa
SCREAMING_SNAKE_CASE : List[Any] = 4
else:
raise ValueError(f"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
if stride_length_s is None:
SCREAMING_SNAKE_CASE : List[str] = chunk_length_s / 6
SCREAMING_SNAKE_CASE : Optional[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__UpperCamelCase ,(int, float) ):
SCREAMING_SNAKE_CASE : str = [stride_length_s, stride_length_s]
SCREAMING_SNAKE_CASE : List[str] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
SCREAMING_SNAKE_CASE : List[str] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = datetime.datetime.now()
SCREAMING_SNAKE_CASE : Dict = datetime.timedelta(seconds=__UpperCamelCase )
for item in chunk_bytes_iter(__UpperCamelCase ,__UpperCamelCase ,stride=(stride_left, stride_right) ,stream=__UpperCamelCase ):
# Put everything back in numpy scale
SCREAMING_SNAKE_CASE : List[Any] = np.frombuffer(item['raw'] ,dtype=__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[str] = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
SCREAMING_SNAKE_CASE : Any = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: int ,__UpperCamelCase: Tuple[int, int] ,__UpperCamelCase: bool = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = B''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f"Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}" )
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for raw in iterator:
acc += raw
if stream and len(__UpperCamelCase ) < chunk_len:
SCREAMING_SNAKE_CASE : Tuple = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__UpperCamelCase ) >= chunk_len:
# We are flushing the accumulator
SCREAMING_SNAKE_CASE : Optional[int] = (_stride_left, stride_right)
SCREAMING_SNAKE_CASE : List[str] = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
SCREAMING_SNAKE_CASE : Optional[int] = False
yield item
SCREAMING_SNAKE_CASE : List[Any] = stride_left
SCREAMING_SNAKE_CASE : Union[str, Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__UpperCamelCase ) > stride_left:
SCREAMING_SNAKE_CASE : Tuple = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
SCREAMING_SNAKE_CASE : List[Any] = False
yield item
def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 2**24 # 16Mo
try:
with subprocess.Popen(__UpperCamelCase ,stdout=subprocess.PIPE ,bufsize=__UpperCamelCase ) as ffmpeg_process:
while True:
SCREAMING_SNAKE_CASE : Any = ffmpeg_process.stdout.read(__UpperCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 508 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'SCUT-DLVCLab/lilt-roberta-en-base': (
'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'
),
}
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = "lilt"
def __init__(self , UpperCAmelCase=30522 , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-1_2 , UpperCAmelCase=0 , UpperCAmelCase="absolute" , UpperCAmelCase=None , UpperCAmelCase=4 , UpperCAmelCase=1024 , **UpperCAmelCase , ) -> Optional[Any]:
super().__init__(pad_token_id=UpperCAmelCase , **UpperCAmelCase )
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = position_embedding_type
_snake_case = classifier_dropout
_snake_case = channel_shrink_ratio
_snake_case = max_ad_position_embeddings | 585 |
'''simple docstring'''
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase ) -> Any:
_snake_case = name
_snake_case = val
def __str__(self ) -> List[str]:
return f"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__(self , UpperCAmelCase ) -> Any:
return self.val < other.val
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase ) -> Dict:
_snake_case = {}
_snake_case = {}
_snake_case = self.build_heap(UpperCAmelCase )
def __getitem__(self , UpperCAmelCase ) -> Union[str, Any]:
return self.get_value(UpperCAmelCase )
def lowercase (self , UpperCAmelCase ) -> Dict:
return (idx - 1) // 2
def lowercase (self , UpperCAmelCase ) -> Optional[Any]:
return idx * 2 + 1
def lowercase (self , UpperCAmelCase ) -> Optional[int]:
return idx * 2 + 2
def lowercase (self , UpperCAmelCase ) -> Union[str, Any]:
return self.heap_dict[key]
def lowercase (self , UpperCAmelCase ) -> str:
_snake_case = len(UpperCAmelCase ) - 1
_snake_case = self.get_parent_idx(UpperCAmelCase )
for idx, i in enumerate(UpperCAmelCase ):
_snake_case = idx
_snake_case = i.val
for i in range(UpperCAmelCase , -1 , -1 ):
self.sift_down(UpperCAmelCase , UpperCAmelCase )
return array
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> int:
while True:
_snake_case = self.get_left_child_idx(UpperCAmelCase ) # noqa: E741
_snake_case = self.get_right_child_idx(UpperCAmelCase )
_snake_case = idx
if l < len(UpperCAmelCase ) and array[l] < array[idx]:
_snake_case = l
if r < len(UpperCAmelCase ) and array[r] < array[smallest]:
_snake_case = r
if smallest != idx:
_snake_case, _snake_case = array[smallest], array[idx]
(
(
_snake_case
), (
_snake_case
),
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
_snake_case = smallest
else:
break
def lowercase (self , UpperCAmelCase ) -> str:
_snake_case = self.get_parent_idx(UpperCAmelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
_snake_case, _snake_case = self.heap[idx], self.heap[p]
_snake_case, _snake_case = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
_snake_case = p
_snake_case = self.get_parent_idx(UpperCAmelCase )
def lowercase (self ) -> Optional[int]:
return self.heap[0]
def lowercase (self ) -> List[Any]:
_snake_case, _snake_case = self.heap[-1], self.heap[0]
_snake_case, _snake_case = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
_snake_case = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowercase (self , UpperCAmelCase ) -> List[str]:
self.heap.append(UpperCAmelCase )
_snake_case = len(self.heap ) - 1
_snake_case = node.val
self.sift_up(len(self.heap ) - 1 )
def lowercase (self ) -> int:
return len(self.heap ) == 0
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
_snake_case = new_value
_snake_case = new_value
self.sift_up(self.idx_of_element[node] )
__lowerCAmelCase = Node('R', -1)
__lowerCAmelCase = Node('B', 6)
__lowerCAmelCase = Node('A', 3)
__lowerCAmelCase = Node('X', 1)
__lowerCAmelCase = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__lowerCAmelCase = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod() | 585 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __UpperCAmelCase ( __A , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = KandinskyVaaControlnetPipeline
_lowerCamelCase = ["""image_embeds""", """negative_image_embeds""", """hint"""]
_lowerCamelCase = ["""image_embeds""", """negative_image_embeds""", """hint"""]
_lowerCamelCase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_lowerCamelCase = False
@property
def snake_case_ ( self ):
return 32
@property
def snake_case_ ( self ):
return 32
@property
def snake_case_ ( self ):
return self.time_input_dim
@property
def snake_case_ ( self ):
return self.time_input_dim * 4
@property
def snake_case_ ( self ):
return 100
@property
def snake_case_ ( self ):
torch.manual_seed(0 )
__a = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__a = UNetaDConditionModel(**__A )
return model
@property
def snake_case_ ( self ):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def snake_case_ ( self ):
torch.manual_seed(0 )
__a = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case_ ( self ):
__a = self.dummy_unet
__a = self.dummy_movq
__a = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.00085 , beta_end=0.012 , clip_sample=__A , set_alpha_to_one=__A , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__A , )
__a = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def snake_case_ ( self , __A , __A=0 ):
__a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__A ) ).to(__A )
__a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__A )
# create hint
__a = floats_tensor((1, 3, 64, 64) , rng=random.Random(__A ) ).to(__A )
if str(__A ).startswith("""mps""" ):
__a = torch.manual_seed(__A )
else:
__a = torch.Generator(device=__A ).manual_seed(__A )
__a = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def snake_case_ ( self ):
__a = """cpu"""
__a = self.get_dummy_components()
__a = self.pipeline_class(**__A )
__a = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
__a = pipe(**self.get_dummy_inputs(__A ) )
__a = output.images
__a = pipe(
**self.get_dummy_inputs(__A ) , return_dict=__A , )[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ):
__a = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
__a = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
__a = torch.from_numpy(np.array(__A ) ).float() / 255.0
__a = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
__a = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__A )
__a = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
__a = pipeline.to(__A )
pipeline.set_progress_bar_config(disable=__A )
__a = """A robot, 4k photo"""
__a = torch.Generator(device="""cuda""" ).manual_seed(0 )
__a , __a = pipe_prior(
__A , generator=__A , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__a = torch.Generator(device="""cuda""" ).manual_seed(0 )
__a = pipeline(
image_embeds=__A , negative_image_embeds=__A , hint=__A , generator=__A , num_inference_steps=100 , output_type="""np""" , )
__a = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__A , __A )
| 209 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __UpperCAmelCase ( __A , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = KandinskyVaaControlnetImgaImgPipeline
_lowerCamelCase = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
_lowerCamelCase = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
_lowerCamelCase = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_lowerCamelCase = False
@property
def snake_case_ ( self ):
return 32
@property
def snake_case_ ( self ):
return 32
@property
def snake_case_ ( self ):
return self.time_input_dim
@property
def snake_case_ ( self ):
return self.time_input_dim * 4
@property
def snake_case_ ( self ):
return 100
@property
def snake_case_ ( self ):
torch.manual_seed(0 )
__a = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__a = UNetaDConditionModel(**__A )
return model
@property
def snake_case_ ( self ):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def snake_case_ ( self ):
torch.manual_seed(0 )
__a = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case_ ( self ):
__a = self.dummy_unet
__a = self.dummy_movq
__a = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
__a = DDIMScheduler(**__A )
__a = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def snake_case_ ( self , __A , __A=0 ):
__a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__A ) ).to(__A )
__a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__A )
# create init_image
__a = floats_tensor((1, 3, 64, 64) , rng=random.Random(__A ) ).to(__A )
__a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((256, 256) )
# create hint
__a = floats_tensor((1, 3, 64, 64) , rng=random.Random(__A ) ).to(__A )
if str(__A ).startswith("""mps""" ):
__a = torch.manual_seed(__A )
else:
__a = torch.Generator(device=__A ).manual_seed(__A )
__a = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def snake_case_ ( self ):
__a = """cpu"""
__a = self.get_dummy_components()
__a = self.pipeline_class(**__A )
__a = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
__a = pipe(**self.get_dummy_inputs(__A ) )
__a = output.images
__a = pipe(
**self.get_dummy_inputs(__A ) , return_dict=__A , )[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a = np.array(
[0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ):
__a = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
__a = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__a = init_image.resize((512, 512) )
__a = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
__a = torch.from_numpy(np.array(__A ) ).float() / 255.0
__a = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
__a = """A robot, 4k photo"""
__a = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__A )
__a = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
__a = pipeline.to(__A )
pipeline.set_progress_bar_config(disable=__A )
__a = torch.Generator(device="""cpu""" ).manual_seed(0 )
__a , __a = pipe_prior(
__A , image=__A , strength=0.85 , generator=__A , negative_prompt="""""" , ).to_tuple()
__a = pipeline(
image=__A , image_embeds=__A , negative_image_embeds=__A , hint=__A , generator=__A , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="""np""" , )
__a = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__A , __A )
| 209 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : List[str] = logging.get_logger(__name__)
snake_case : Tuple = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class snake_case_ (_lowerCAmelCase ):
UpperCAmelCase__ : Tuple = "pegasus"
UpperCAmelCase__ : Optional[Any] = ["past_key_values"]
UpperCAmelCase__ : int = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self :Tuple ,__snake_case :str=5_02_65 ,__snake_case :Any=10_24 ,__snake_case :List[Any]=12 ,__snake_case :Dict=40_96 ,__snake_case :Tuple=16 ,__snake_case :Any=12 ,__snake_case :str=40_96 ,__snake_case :Optional[Any]=16 ,__snake_case :List[Any]=0.0 ,__snake_case :int=0.0 ,__snake_case :Any=True ,__snake_case :List[Any]=True ,__snake_case :Any="gelu" ,__snake_case :Any=10_24 ,__snake_case :Optional[int]=0.1 ,__snake_case :int=0.0 ,__snake_case :int=0.0 ,__snake_case :str=0.02 ,__snake_case :str=0 ,__snake_case :Optional[Any]=False ,__snake_case :Union[str, Any]=0 ,__snake_case :Union[str, Any]=1 ,__snake_case :str=1 ,**__snake_case :Optional[Any] ,) -> int:
a__ = vocab_size
a__ = max_position_embeddings
a__ = d_model
a__ = encoder_ffn_dim
a__ = encoder_layers
a__ = encoder_attention_heads
a__ = decoder_ffn_dim
a__ = decoder_layers
a__ = decoder_attention_heads
a__ = dropout
a__ = attention_dropout
a__ = activation_dropout
a__ = activation_function
a__ = init_std
a__ = encoder_layerdrop
a__ = decoder_layerdrop
a__ = use_cache
a__ = encoder_layers
a__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=A_ ,eos_token_id=A_ ,is_encoder_decoder=A_ ,decoder_start_token_id=A_ ,forced_eos_token_id=A_ ,**A_ ,)
@property
def lowerCamelCase__( self :Union[str, Any] ) -> int:
return self.encoder_attention_heads
@property
def lowerCamelCase__( self :Any ) -> int:
return self.d_model
| 335 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
UpperCAmelCase = logging.get_logger(__name__)
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self , *A_ , **A_ ) -> None:
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , A_ , )
super().__init__(*A_ , **A_ ) | 433 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"transformer.blocks.{i}.norm1.weight", F"vilt.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"transformer.blocks.{i}.norm1.bias", F"vilt.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"transformer.blocks.{i}.attn.proj.weight", F"vilt.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"transformer.blocks.{i}.attn.proj.bias", F"vilt.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"transformer.blocks.{i}.norm2.weight", F"vilt.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"transformer.blocks.{i}.norm2.bias", F"vilt.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(F"transformer.blocks.{i}.mlp.fc1.weight", F"vilt.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"transformer.blocks.{i}.mlp.fc1.bias", F"vilt.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"transformer.blocks.{i}.mlp.fc2.weight", F"vilt.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"transformer.blocks.{i}.mlp.fc2.bias", F"vilt.encoder.layer.{i}.output.dense.bias") )
# embeddings
rename_keys.extend(
[
# text embeddings
("""text_embeddings.word_embeddings.weight""", """vilt.embeddings.text_embeddings.word_embeddings.weight"""),
(
"""text_embeddings.position_embeddings.weight""",
"""vilt.embeddings.text_embeddings.position_embeddings.weight""",
),
("""text_embeddings.position_ids""", """vilt.embeddings.text_embeddings.position_ids"""),
(
"""text_embeddings.token_type_embeddings.weight""",
"""vilt.embeddings.text_embeddings.token_type_embeddings.weight""",
),
("""text_embeddings.LayerNorm.weight""", """vilt.embeddings.text_embeddings.LayerNorm.weight"""),
("""text_embeddings.LayerNorm.bias""", """vilt.embeddings.text_embeddings.LayerNorm.bias"""),
# patch embeddings
("""transformer.cls_token""", """vilt.embeddings.cls_token"""),
("""transformer.patch_embed.proj.weight""", """vilt.embeddings.patch_embeddings.projection.weight"""),
("""transformer.patch_embed.proj.bias""", """vilt.embeddings.patch_embeddings.projection.bias"""),
("""transformer.pos_embed""", """vilt.embeddings.position_embeddings"""),
# token type embeddings
("""token_type_embeddings.weight""", """vilt.embeddings.token_type_embeddings.weight"""),
] )
# final layernorm + pooler
rename_keys.extend(
[
("""transformer.norm.weight""", """vilt.layernorm.weight"""),
("""transformer.norm.bias""", """vilt.layernorm.bias"""),
("""pooler.dense.weight""", """vilt.pooler.dense.weight"""),
("""pooler.dense.bias""", """vilt.pooler.dense.bias"""),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("""vqa_classifier.0.weight""", """classifier.0.weight"""),
("""vqa_classifier.0.bias""", """classifier.0.bias"""),
("""vqa_classifier.1.weight""", """classifier.1.weight"""),
("""vqa_classifier.1.bias""", """classifier.1.bias"""),
("""vqa_classifier.3.weight""", """classifier.3.weight"""),
("""vqa_classifier.3.bias""", """classifier.3.bias"""),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("""nlvr2_classifier.0.weight""", """classifier.0.weight"""),
("""nlvr2_classifier.0.bias""", """classifier.0.bias"""),
("""nlvr2_classifier.1.weight""", """classifier.1.weight"""),
("""nlvr2_classifier.1.bias""", """classifier.1.bias"""),
("""nlvr2_classifier.3.weight""", """classifier.3.weight"""),
("""nlvr2_classifier.3.bias""", """classifier.3.bias"""),
] )
else:
pass
return rename_keys
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
_SCREAMING_SNAKE_CASE = """vilt."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_SCREAMING_SNAKE_CASE = state_dict.pop(F"transformer.blocks.{i}.attn.qkv.weight" )
_SCREAMING_SNAKE_CASE = state_dict.pop(F"transformer.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_SCREAMING_SNAKE_CASE = in_proj_weight[
: config.hidden_size, :
]
_SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size]
_SCREAMING_SNAKE_CASE = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_SCREAMING_SNAKE_CASE = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_SCREAMING_SNAKE_CASE = in_proj_weight[
-config.hidden_size :, :
]
_SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = dct.pop(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = val
@torch.no_grad()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ViltConfig(image_size=3_84 , patch_size=32 , tie_word_embeddings=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
if "vqa" in checkpoint_url:
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = 31_29
_SCREAMING_SNAKE_CASE = """huggingface/label-files"""
_SCREAMING_SNAKE_CASE = """vqa2-id2label.json"""
_SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="""dataset""" ) , """r""" ) )
_SCREAMING_SNAKE_CASE = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = idalabel
_SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = ViltForQuestionAnswering(SCREAMING_SNAKE_CASE_ )
elif "nlvr" in checkpoint_url:
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = {0: """False""", 1: """True"""}
_SCREAMING_SNAKE_CASE = {v: k for k, v in config.idalabel.items()}
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = ViltForImagesAndTextClassification(SCREAMING_SNAKE_CASE_ )
elif "irtr" in checkpoint_url:
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = ViltForImageAndTextRetrieval(SCREAMING_SNAKE_CASE_ )
elif "mlm_itm" in checkpoint_url:
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = ViltForMaskedLM(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError("""Unknown model type""" )
# load state_dict of original model, remove and rename some keys
_SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )["""state_dict"""]
_SCREAMING_SNAKE_CASE = create_rename_keys(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if mlm_model or irtr_model:
_SCREAMING_SNAKE_CASE = ["""itm_score.fc.weight""", """itm_score.fc.bias"""]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Define processor
_SCREAMING_SNAKE_CASE = ViltImageProcessor(size=3_84 )
_SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("""bert-base-uncased""" )
_SCREAMING_SNAKE_CASE = ViltProcessor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Forward pass on example inputs (image + text)
if nlvr_model:
_SCREAMING_SNAKE_CASE = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=SCREAMING_SNAKE_CASE_ ).raw )
_SCREAMING_SNAKE_CASE = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=SCREAMING_SNAKE_CASE_ ).raw )
_SCREAMING_SNAKE_CASE = (
"""The left image contains twice the number of dogs as the right image, and at least two dogs in total are"""
""" standing."""
)
_SCREAMING_SNAKE_CASE = processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
_SCREAMING_SNAKE_CASE = processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
_SCREAMING_SNAKE_CASE = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
_SCREAMING_SNAKE_CASE = Image.open(requests.get("""http://images.cocodataset.org/val2017/000000039769.jpg""" , stream=SCREAMING_SNAKE_CASE_ ).raw )
if mlm_model:
_SCREAMING_SNAKE_CASE = """a bunch of [MASK] laying on a [MASK]."""
else:
_SCREAMING_SNAKE_CASE = """How many cats are there?"""
_SCREAMING_SNAKE_CASE = processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
_SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
# Verify outputs
if mlm_model:
_SCREAMING_SNAKE_CASE = torch.Size([1, 11, 3_05_22] )
_SCREAMING_SNAKE_CASE = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
# verify masked token prediction equals "cats"
_SCREAMING_SNAKE_CASE = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
_SCREAMING_SNAKE_CASE = torch.Size([1, 31_29] )
_SCREAMING_SNAKE_CASE = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
# verify vqa prediction equals "2"
_SCREAMING_SNAKE_CASE = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
_SCREAMING_SNAKE_CASE = torch.Size([1, 2] )
_SCREAMING_SNAKE_CASE = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCamelCase__ = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 710 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _a (_lowerCamelCase):
"""simple docstring"""
def __init__( self , A__ , A__ ) -> Any:
_SCREAMING_SNAKE_CASE = params
_SCREAMING_SNAKE_CASE = np.array(A__ )
_SCREAMING_SNAKE_CASE = np.array([len(A__ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , A__ ) -> Dict:
return (self.token_ids[index], self.lengths[index])
def __len__( self ) -> Tuple:
return len(self.lengths )
def UpperCamelCase ( self ) -> Dict:
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.params.max_model_input_size
_SCREAMING_SNAKE_CASE = self.lengths > max_len
logger.info(F"Splitting {sum(A__ )} too long sequences." )
def divide_chunks(A__ , A__ ):
return [l[i : i + n] for i in range(0 , len(A__ ) , A__ )]
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
if self.params.mlm:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
_SCREAMING_SNAKE_CASE = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
_SCREAMING_SNAKE_CASE = np.insert(A__ , 0 , A__ )
if sub_s[-1] != sep_id:
_SCREAMING_SNAKE_CASE = np.insert(A__ , len(A__ ) , A__ )
assert len(A__ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(A__ )
new_tok_ids.extend(A__ )
new_lengths.extend([len(A__ ) for l in sub_seqs] )
_SCREAMING_SNAKE_CASE = np.array(A__ )
_SCREAMING_SNAKE_CASE = np.array(A__ )
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = len(self )
_SCREAMING_SNAKE_CASE = self.lengths > 11
_SCREAMING_SNAKE_CASE = self.token_ids[indices]
_SCREAMING_SNAKE_CASE = self.lengths[indices]
_SCREAMING_SNAKE_CASE = len(self )
logger.info(F"Remove {init_size - new_size} too short (<=11 tokens) sequences." )
def UpperCamelCase ( self ) -> int:
if "unk_token" not in self.params.special_tok_ids:
return
else:
_SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""unk_token"""]
_SCREAMING_SNAKE_CASE = len(self )
_SCREAMING_SNAKE_CASE = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
_SCREAMING_SNAKE_CASE = (unk_occs / self.lengths) < 0.5
_SCREAMING_SNAKE_CASE = self.token_ids[indices]
_SCREAMING_SNAKE_CASE = self.lengths[indices]
_SCREAMING_SNAKE_CASE = len(self )
logger.info(F"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%)." )
def UpperCamelCase ( self ) -> Optional[Any]:
if not self.params.is_master:
return
logger.info(F"{len(self )} sequences" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def UpperCamelCase ( self , A__ ) -> Any:
_SCREAMING_SNAKE_CASE = [t[0] for t in batch]
_SCREAMING_SNAKE_CASE = [t[1] for t in batch]
assert len(A__ ) == len(A__ )
# Max for paddings
_SCREAMING_SNAKE_CASE = max(A__ )
# Pad token ids
if self.params.mlm:
_SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""pad_token"""]
else:
_SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""unk_token"""]
_SCREAMING_SNAKE_CASE = [list(t.astype(A__ ) ) + [pad_idx] * (max_seq_len_ - len(A__ )) for t in token_ids]
assert len(tk_ ) == len(A__ )
assert all(len(A__ ) == max_seq_len_ for t in tk_ )
_SCREAMING_SNAKE_CASE = torch.tensor(tk_ ) # (bs, max_seq_len_)
_SCREAMING_SNAKE_CASE = torch.tensor(A__ ) # (bs)
return tk_t, lg_t
| 0 | 0 |
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE :Any = logging.get_logger(__name__)
set_seed(770)
SCREAMING_SNAKE_CASE :Any = {
'c_attn': 'att_proj',
'c_proj': 'out_proj',
'c_fc': 'in_proj',
'transformer.': '',
'h.': 'layers.',
'ln_1': 'layernorm_1',
'ln_2': 'layernorm_2',
'ln_f': 'layernorm_final',
'wpe': 'position_embeds_layer',
'wte': 'input_embeds_layer',
}
SCREAMING_SNAKE_CASE :Union[str, Any] = {
'text_small': {
'repo_id': 'suno/bark',
'file_name': 'text.pt',
},
'coarse_small': {
'repo_id': 'suno/bark',
'file_name': 'coarse.pt',
},
'fine_small': {
'repo_id': 'suno/bark',
'file_name': 'fine.pt',
},
'text': {
'repo_id': 'suno/bark',
'file_name': 'text_2.pt',
},
'coarse': {
'repo_id': 'suno/bark',
'file_name': 'coarse_2.pt',
},
'fine': {
'repo_id': 'suno/bark',
'file_name': 'fine_2.pt',
},
}
SCREAMING_SNAKE_CASE :List[str] = os.path.dirname(os.path.abspath(__file__))
SCREAMING_SNAKE_CASE :Optional[int] = os.path.join(os.path.expanduser('~'), '.cache')
SCREAMING_SNAKE_CASE :Any = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0')
def UpperCAmelCase ( a_ , a_=False ) -> Optional[Any]:
"""simple docstring"""
__A = model_type
if use_small:
key += "_small"
return os.path.join(a_ , REMOTE_MODEL_PATHS[key]["file_name"] )
def UpperCAmelCase ( a_ , a_ ) -> List[str]:
"""simple docstring"""
os.makedirs(a_ , exist_ok=a_ )
hf_hub_download(repo_id=a_ , filename=a_ , local_dir=a_ )
def UpperCAmelCase ( a_ , a_ , a_=False , a_="text" ) -> List[Any]:
"""simple docstring"""
if model_type == "text":
__A = BarkSemanticModel
__A = BarkSemanticConfig
__A = BarkSemanticGenerationConfig
elif model_type == "coarse":
__A = BarkCoarseModel
__A = BarkCoarseConfig
__A = BarkCoarseGenerationConfig
elif model_type == "fine":
__A = BarkFineModel
__A = BarkFineConfig
__A = BarkFineGenerationConfig
else:
raise NotImplementedError()
__A = F'''{model_type}_small''' if use_small else model_type
__A = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(a_ ):
logger.info(F'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' )
_download(model_info["repo_id"] , model_info["file_name"] )
__A = torch.load(a_ , map_location=a_ )
# this is a hack
__A = checkpoint["model_args"]
if "input_vocab_size" not in model_args:
__A = model_args["vocab_size"]
__A = model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
__A = model_args.pop("n_head" )
__A = model_args.pop("n_embd" )
__A = model_args.pop("n_layer" )
__A = ConfigClass(**checkpoint["model_args"] )
__A = ModelClass(config=a_ )
__A = GenerationConfigClass()
__A = model_generation_config
__A = checkpoint["model"]
# fixup checkpoint
__A = "_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(a_ ):
# replace part of the key with corresponding layer name in HF implementation
__A = k[len(a_ ) :]
for old_layer_name in new_layer_name_dict:
__A = new_k.replace(a_ , new_layer_name_dict[old_layer_name] )
__A = state_dict.pop(a_ )
__A = set(state_dict.keys() ) - set(model.state_dict().keys() )
__A = {k for k in extra_keys if not k.endswith(".attn.bias" )}
__A = set(model.state_dict().keys() ) - set(state_dict.keys() )
__A = {k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(a_ ) != 0:
raise ValueError(F'''extra keys found: {extra_keys}''' )
if len(a_ ) != 0:
raise ValueError(F'''missing keys: {missing_keys}''' )
model.load_state_dict(a_ , strict=a_ )
__A = model.num_parameters(exclude_embeddings=a_ )
__A = checkpoint["best_val_loss"].item()
logger.info(F'''model loaded: {round(n_params/1E6 , 1 )}M params, {round(a_ , 3 )} loss''' )
model.eval()
model.to(a_ )
del checkpoint, state_dict
return model
def UpperCAmelCase ( a_ , a_=False , a_="text" ) -> List[Any]:
"""simple docstring"""
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
__A = "cpu" # do conversion on cpu
__A = _get_ckpt_path(a_ , use_small=a_ )
__A = _load_model(a_ , a_ , model_type=a_ , use_small=a_ )
# load bark initial model
__A = _bark_load_model(a_ , "cpu" , model_type=a_ , use_small=a_ )
if model_type == "text":
__A = bark_model["model"]
if model.num_parameters(exclude_embeddings=a_ ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
__A = 5
__A = 1_0
if model_type in ["text", "coarse"]:
__A = torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
__A = bark_model(a_ )[0]
__A = model(a_ )
# take last logits
__A = output_new_model_total.logits[:, [-1], :]
else:
__A = 3
__A = 8
__A = torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
__A = model(a_ , a_ )
__A = bark_model(a_ , a_ )
__A = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("initial and new outputs are not equal" )
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ , a_ , ) -> Union[str, Any]:
"""simple docstring"""
__A = os.path.join(a_ , a_ )
__A = BarkSemanticConfig.from_pretrained(os.path.join(a_ , "config.json" ) )
__A = BarkCoarseConfig.from_pretrained(os.path.join(a_ , "config.json" ) )
__A = BarkFineConfig.from_pretrained(os.path.join(a_ , "config.json" ) )
__A = EncodecConfig.from_pretrained("facebook/encodec_24khz" )
__A = BarkSemanticModel.from_pretrained(a_ )
__A = BarkCoarseModel.from_pretrained(a_ )
__A = BarkFineModel.from_pretrained(a_ )
__A = EncodecModel.from_pretrained("facebook/encodec_24khz" )
__A = BarkConfig.from_sub_model_configs(
a_ , a_ , a_ , a_ )
__A = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
__A = BarkModel(a_ )
__A = semantic
__A = coarseAcoustic
__A = fineAcoustic
__A = codec
__A = bark_generation_config
Path(a_ ).mkdir(exist_ok=a_ )
bark.save_pretrained(a_ , repo_id=a_ , push_to_hub=a_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('model_type', type=str, help='text, coarse or fine.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.')
SCREAMING_SNAKE_CASE :Union[str, Any] = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 55 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : str ,A : int ,A : int=2 ,A : Optional[Any]=3 ,A : Dict=4 ,A : Optional[int]=2 ,A : Union[str, Any]=7 ,A : List[str]=True ,A : Union[str, Any]=True ,A : Optional[int]=True ,A : Optional[int]=True ,A : Tuple=99 ,A : Optional[int]=36 ,A : Dict=3 ,A : str=4 ,A : Optional[Any]=37 ,A : Dict="gelu" ,A : Dict=0.1 ,A : Union[str, Any]=0.1 ,A : Union[str, Any]=5_12 ,A : Any=16 ,A : Union[str, Any]=2 ,A : List[Any]=0.02 ,A : List[Any]=6 ,A : Optional[int]=6 ,A : List[Any]=3 ,A : Union[str, Any]=4 ,A : Tuple=None ,A : List[str]=10_00 ,):
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = patch_size
__A = text_seq_length
__A = is_training
__A = use_input_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = coordinate_size
__A = shape_size
__A = num_labels
__A = num_choices
__A = scope
__A = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__A = text_seq_length
__A = (image_size // patch_size) ** 2 + 1
__A = self.text_seq_length + self.image_seq_length
def UpperCamelCase_ ( self : int ):
__A = ids_tensor([self.batch_size, self.text_seq_length] ,self.vocab_size )
__A = ids_tensor([self.batch_size, self.text_seq_length, 4] ,self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__A = bbox[i, j, 3]
__A = bbox[i, j, 1]
__A = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__A = bbox[i, j, 2]
__A = bbox[i, j, 0]
__A = t
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_input_mask:
__A = random_attention_mask([self.batch_size, self.text_seq_length] )
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.text_seq_length] ,self.type_vocab_size )
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.text_seq_length] ,self.num_labels )
__A = LayoutLMvaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,coordinate_size=self.coordinate_size ,shape_size=self.shape_size ,input_size=self.image_size ,patch_size=self.patch_size ,)
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase_ ( self : Optional[int] ,A : List[str] ,A : Any ,A : Dict ,A : List[Any] ,A : Optional[int] ,A : Any ,A : Dict ,A : List[Any] ):
__A = LayoutLMvaModel(config=A )
model.to(A )
model.eval()
# text + image
__A = model(A ,pixel_values=A )
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A )
__A = model(A ,bbox=A ,pixel_values=A ,token_type_ids=A )
__A = model(A ,bbox=A ,pixel_values=A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
# text only
__A = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__A = model(pixel_values=A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Optional[int] ,A : Dict ,A : List[str] ,A : Any ,A : List[Any] ,A : Any ,A : Any ,A : Dict ,A : Optional[Any] ):
__A = self.num_labels
__A = LayoutLMvaForSequenceClassification(A )
model.to(A )
model.eval()
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A ,labels=A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : str ,A : Optional[Any] ,A : Dict ,A : str ,A : Tuple ,A : Union[str, Any] ,A : List[Any] ,A : Any ,A : Union[str, Any] ):
__A = self.num_labels
__A = LayoutLMvaForTokenClassification(config=A )
model.to(A )
model.eval()
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A ,labels=A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Optional[int] ,A : Optional[Any] ,A : int ,A : str ,A : List[str] ,A : int ,A : List[str] ,A : List[str] ,A : Dict ):
__A = LayoutLMvaForQuestionAnswering(config=A )
model.to(A )
model.eval()
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A ,start_positions=A ,end_positions=A ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : str ):
__A = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = config_and_inputs
__A = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case_ = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self : str ,A : Any ,A : Any ,A : Tuple ,A : List[Any] ,A : Optional[Any] ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = LayoutLMvaModelTester(self )
__A = ConfigTester(self ,config_class=A ,hidden_size=37 )
def UpperCamelCase_ ( self : List[Any] ,A : int ,A : List[str] ,A : Dict=False ):
__A = copy.deepcopy(A )
if model_class in get_values(A ):
__A = {
k: v.unsqueeze(1 ).expand(-1 ,self.model_tester.num_choices ,-1 ).contiguous()
if isinstance(A ,torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(A ):
__A = torch.ones(self.model_tester.batch_size ,dtype=torch.long ,device=A )
elif model_class in get_values(A ):
__A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
__A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
elif model_class in [
*get_values(A ),
]:
__A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
elif model_class in [
*get_values(A ),
]:
__A = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) ,dtype=torch.long ,device=A ,)
return inputs_dict
def UpperCamelCase_ ( self : List[Any] ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : str ):
__A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__A = type
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
def UpperCamelCase_ ( self : str ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = LayoutLMvaModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
__A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : Any ):
return LayoutLMvaImageProcessor(apply_ocr=A ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self : Dict ):
__A = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(A )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=A ,return_tensors="pt" ).pixel_values.to(A )
__A = torch.tensor([[1, 2]] )
__A = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
__A = model(
input_ids=input_ids.to(A ) ,bbox=bbox.to(A ) ,pixel_values=pixel_values.to(A ) ,)
# verify the logits
__A = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape ,A )
__A = torch.tensor(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,A ,atol=1E-4 ) )
| 55 | 1 |
from __future__ import annotations
from math import pi
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
"""simple docstring"""
lowercase_ = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
lowercase_ = ['a', 'b', 'c', 'd', 'e']
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = start
# add current to visited
visited.append(__UpperCamelCase )
__A = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__A = topological_sort(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# if all neighbors visited add current to sort
sort.append(__UpperCamelCase )
# if all vertices haven't been visited select a new one to visit
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
for vertice in vertices:
if vertice not in visited:
__A = topological_sort(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# return sort
return sort
if __name__ == "__main__":
lowercase_ = topological_sort('a', [], [])
print(sort)
| 215 | 0 |
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
_UpperCAmelCase : str = logging.get_logger(__name__)
class lowercase ( lowercase_ ):
def a ( self , snake_case ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
snake_case_ = [label.strip() for label in labels.split(',' ) if label.strip()]
return labels
def __call__( self , snake_case , snake_case , snake_case ):
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
raise ValueError('You must include at least one label and at least one sequence.' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. '
'Make sure the passed template includes formatting syntax such as {{}} where the label should go.'
).format(__lowerCAmelCase ) )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
snake_case_ = [sequences]
snake_case_ = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(__lowerCAmelCase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(lowercase_ )
class lowercase ( lowercase_ ):
def __init__( self , snake_case=ZeroShotClassificationArgumentHandler() , *snake_case , **snake_case ):
snake_case_ = args_parser
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
if self.entailment_id == -1:
logger.warning(
'Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '
'-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.' )
@property
def a ( self ):
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('entail' ):
return ind
return -1
def a ( self , snake_case , snake_case=True , snake_case=True , snake_case=TruncationStrategy.ONLY_FIRST , **snake_case ):
snake_case_ = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'Tokenizer was not supporting padding necessary for zero-shot, attempting to use '
' `pad_token=eos_token`' )
snake_case_ = self.tokenizer.eos_token
try:
snake_case_ = self.tokenizer(
__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , )
except Exception as e:
if "too short" in str(__lowerCAmelCase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
snake_case_ = self.tokenizer(
__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def a ( self , **snake_case ):
if kwargs.get('multi_class' , __lowerCAmelCase ) is not None:
snake_case_ = kwargs['multi_class']
logger.warning(
'The `multi_class` argument has been deprecated and renamed to `multi_label`. '
'`multi_class` will be removed in a future version of Transformers.' )
snake_case_ = {}
if "candidate_labels" in kwargs:
snake_case_ = self._args_parser._parse_labels(kwargs['candidate_labels'] )
if "hypothesis_template" in kwargs:
snake_case_ = kwargs['hypothesis_template']
snake_case_ = {}
if "multi_label" in kwargs:
snake_case_ = kwargs['multi_label']
return preprocess_params, {}, postprocess_params
def __call__( self , snake_case , *snake_case , **snake_case , ):
if len(__lowerCAmelCase ) == 0:
pass
elif len(__lowerCAmelCase ) == 1 and "candidate_labels" not in kwargs:
snake_case_ = args[0]
else:
raise ValueError(F'''Unable to understand extra arguments {args}''' )
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def a ( self , snake_case , snake_case=None , snake_case="This example is {}." ):
snake_case_ , snake_case_ = self._args_parser(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for i, (candidate_label, sequence_pair) in enumerate(zip(__lowerCAmelCase , __lowerCAmelCase ) ):
snake_case_ = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(__lowerCAmelCase ) - 1,
**model_input,
}
def a ( self , snake_case ):
snake_case_ = inputs['candidate_label']
snake_case_ = inputs['sequence']
snake_case_ = {k: inputs[k] for k in self.tokenizer.model_input_names}
snake_case_ = self.model(**__lowerCAmelCase )
snake_case_ = {
'candidate_label': candidate_label,
'sequence': sequence,
'is_last': inputs['is_last'],
**outputs,
}
return model_outputs
def a ( self , snake_case , snake_case=False ):
snake_case_ = [outputs['candidate_label'] for outputs in model_outputs]
snake_case_ = [outputs['sequence'] for outputs in model_outputs]
snake_case_ = np.concatenate([output['logits'].numpy() for output in model_outputs] )
snake_case_ = logits.shape[0]
snake_case_ = len(__lowerCAmelCase )
snake_case_ = N // n
snake_case_ = logits.reshape((num_sequences, n, -1) )
if multi_label or len(__lowerCAmelCase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
snake_case_ = self.entailment_id
snake_case_ = -1 if entailment_id == 0 else 0
snake_case_ = reshaped_outputs[..., [contradiction_id, entailment_id]]
snake_case_ = np.exp(__lowerCAmelCase ) / np.exp(__lowerCAmelCase ).sum(-1 , keepdims=__lowerCAmelCase )
snake_case_ = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
snake_case_ = reshaped_outputs[..., self.entailment_id]
snake_case_ = np.exp(__lowerCAmelCase ) / np.exp(__lowerCAmelCase ).sum(-1 , keepdims=__lowerCAmelCase )
snake_case_ = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 362 | """simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ ( lowerCAmelCase__ :dict , lowerCAmelCase__ :str ) -> set[str]:
'''simple docstring'''
lowercase , lowercase = set(lowerCAmelCase__ ), [start]
while stack:
lowercase = stack.pop()
explored.add(lowerCAmelCase__ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(lowerCAmelCase__ )
return explored
__lowerCAmelCase : str ={
"""A""": ["""B""", """C""", """D"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F"""],
"""D""": ["""B""", """D"""],
"""E""": ["""B""", """F"""],
"""F""": ["""C""", """E""", """G"""],
"""G""": ["""F"""],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, """A"""))
| 359 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCamelCase = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 515 |
from sklearn.metrics import fa_score
import datasets
UpperCamelCase = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
UpperCamelCase = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
UpperCamelCase = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def a ( self : Optional[int] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , )
def a ( self : Any , lowerCamelCase__ : Dict , lowerCamelCase__ : str , lowerCamelCase__ : Tuple=None , lowerCamelCase__ : Tuple=1 , lowerCamelCase__ : Tuple="binary" , lowerCamelCase__ : Dict=None ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = fa_score(
lowerCamelCase__ , lowerCamelCase__ , labels=lowerCamelCase__ , pos_label=lowerCamelCase__ , average=lowerCamelCase__ , sample_weight=lowerCamelCase__ )
return {"f1": float(lowerCamelCase__ ) if score.size == 1 else score}
| 515 | 1 |
lowerCAmelCase__ :Tuple = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCAmelCase__ :Union[str, Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCAmelCase__ :Tuple = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 618 |
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __a ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.02 , ) -> int:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = num_patches + 1
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, pixel_values
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = FlaxViTModel(config=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase = (self.image_size, self.image_size)
_UpperCAmelCase = (self.patch_size, self.patch_size)
_UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.type_sequence_label_size
_UpperCAmelCase = FlaxViTForImageClassification(config=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = FlaxViTForImageClassification(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class __a ( UpperCAmelCase , unittest.TestCase ):
_a : List[str] = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def UpperCAmelCase__ ( self ) -> None:
"""simple docstring"""
_UpperCAmelCase = FlaxViTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
@jax.jit
def model_jitted(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return model(pixel_values=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
with self.subTest('JIT Enabled' ):
_UpperCAmelCase = model_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_UpperCAmelCase = model_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained('google/vit-base-patch16-224' )
_UpperCAmelCase = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
| 618 | 1 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def a ( A__ : bool = True , *A__ : Dict , **A__ : Any ) -> int:
"""simple docstring"""
if not is_tqdm_available():
raise ImportError('Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.' )
_lowercase =False
if main_process_only:
_lowercase =PartialState().local_process_index == 0
return _tqdm(*_UpperCamelCase , **_UpperCamelCase , disable=_UpperCamelCase )
| 716 |
print((lambda quine: quine % quine)('print((lambda quine: quine %% quine)(%r))'))
| 380 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ : List[str] = logging.get_logger(__name__)
__magic_name__ : List[str] = {
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class SCREAMING_SNAKE_CASE__ (_UpperCamelCase ):
lowercase_ : List[Any] = "vit"
def __init__( self : List[str] , __lowerCamelCase : Optional[int]=7_68 , __lowerCamelCase : Union[str, Any]=12 , __lowerCamelCase : str=12 , __lowerCamelCase : int=30_72 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : str=0.0 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : str=1e-12 , __lowerCamelCase : Optional[Any]=2_24 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : int=3 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Tuple=16 , **__lowerCamelCase : List[Any] , ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = qkv_bias
lowerCAmelCase__ = encoder_stride
class SCREAMING_SNAKE_CASE__ (_UpperCamelCase ):
lowercase_ : List[Any] = version.parse("1.11" )
@property
def A__ ( self : Union[str, Any] ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def A__ ( self : Optional[int] ):
"""simple docstring"""
return 1e-4
| 615 | '''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[Any], UpperCamelCase__ : Tuple, UpperCamelCase__ : Any=2, UpperCamelCase__ : Union[str, Any]=True, UpperCamelCase__ : Optional[Any]=False, UpperCamelCase__ : int=10, UpperCamelCase__ : List[str]=3, UpperCamelCase__ : List[str]=32 * 4, UpperCamelCase__ : List[Any]=32 * 6, UpperCamelCase__ : Dict=4, UpperCamelCase__ : str=32, ) -> Dict:
_A = parent
_A = batch_size
_A = is_training
_A = use_auxiliary_loss
_A = num_queries
_A = num_channels
_A = min_size
_A = max_size
_A = num_labels
_A = mask_feature_size
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_A = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
UpperCamelCase__ )
_A = torch.ones([self.batch_size, self.min_size, self.max_size], device=UpperCamelCase__ )
_A = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size], device=UpperCamelCase__ ) > 0.5
).float()
_A = (torch.rand((self.batch_size, self.num_labels), device=UpperCamelCase__ ) > 0.5).long()
_A = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1], ), decoder_config=DetrConfig(
decoder_ffn_dim=1_28, num_queries=self.num_queries, decoder_attention_heads=2, d_model=self.mask_feature_size, ), mask_feature_size=self.mask_feature_size, fpn_feature_size=self.mask_feature_size, num_channels=self.num_channels, num_labels=self.num_labels, )
def __UpperCAmelCase ( self : int ) -> Tuple:
_A , _A , _A , _A , _A = self.prepare_config_and_inputs()
_A = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def __UpperCAmelCase ( self : Optional[int], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Dict ) -> List[Any]:
_A = output.encoder_hidden_states
_A = output.pixel_decoder_hidden_states
_A = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(UpperCamelCase__ ), len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCamelCase__ ), len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCamelCase__ ), config.decoder_config.decoder_layers )
def __UpperCAmelCase ( self : Any, UpperCamelCase__ : Any, UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Optional[int], UpperCamelCase__ : Optional[Any]=False ) -> Optional[Any]:
with torch.no_grad():
_A = MaskFormerModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_A = model(pixel_values=UpperCamelCase__, pixel_mask=UpperCamelCase__ )
_A = model(UpperCamelCase__, output_hidden_states=UpperCamelCase__ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape, (self.batch_size, self.num_queries, self.mask_feature_size), )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(UpperCamelCase__, UpperCamelCase__ )
def __UpperCAmelCase ( self : List[Any], UpperCamelCase__ : Tuple, UpperCamelCase__ : Optional[Any], UpperCamelCase__ : int, UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Tuple ) -> str:
_A = MaskFormerForInstanceSegmentation(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
def comm_check_on_output(UpperCamelCase__ : Any ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape, (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4), )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_A = model(pixel_values=UpperCamelCase__, pixel_mask=UpperCamelCase__ )
_A = model(UpperCamelCase__ )
comm_check_on_output(UpperCamelCase__ )
_A = model(
pixel_values=UpperCamelCase__, pixel_mask=UpperCamelCase__, mask_labels=UpperCamelCase__, class_labels=UpperCamelCase__ )
comm_check_on_output(UpperCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape, torch.Size([1] ) )
@require_torch
class lowercase_ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
__lowerCAmelCase = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
_A = MaskFormerModelTester(self )
_A = ConfigTester(self, config_class=UpperCamelCase__, has_text_modality=UpperCamelCase__ )
def __UpperCAmelCase ( self : List[str] ) -> int:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Dict ) -> int:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCamelCase__, **UpperCamelCase__, output_hidden_states=UpperCamelCase__ )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*UpperCamelCase__ )
@unittest.skip(reason='MaskFormer does not use inputs_embeds' )
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
pass
@unittest.skip(reason='MaskFormer does not have a get_input_embeddings method' )
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
pass
@unittest.skip(reason='MaskFormer is not a generative model' )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
pass
@unittest.skip(reason='MaskFormer does not use token embeddings' )
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self : Any ) -> Any:
pass
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(UpperCamelCase__ )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ['pixel_values']
self.assertListEqual(arg_names[:1], UpperCamelCase__ )
@slow
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
_A = MaskFormerModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
_A = (self.model_tester.min_size,) * 2
_A = {
'pixel_values': torch.randn((2, 3, *size), device=UpperCamelCase__ ),
'mask_labels': torch.randn((2, 10, *size), device=UpperCamelCase__ ),
'class_labels': torch.zeros(2, 10, device=UpperCamelCase__ ).long(),
}
_A = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(UpperCamelCase__ )
_A = model(**UpperCamelCase__ )
self.assertTrue(outputs.loss is not None )
def __UpperCAmelCase ( self : Dict ) -> Dict:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCamelCase__, **UpperCamelCase__, output_hidden_states=UpperCamelCase__ )
def __UpperCAmelCase ( self : int ) -> Tuple:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(UpperCamelCase__ ).to(UpperCamelCase__ )
_A = model(**UpperCamelCase__, output_attentions=UpperCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_A = self.all_model_classes[1]
_A , _A , _A , _A , _A = self.model_tester.prepare_config_and_inputs()
_A = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
_A = model(UpperCamelCase__, mask_labels=UpperCamelCase__, class_labels=UpperCamelCase__ ).loss
loss.backward()
def __UpperCAmelCase ( self : Dict ) -> List[str]:
# only MaskFormerForInstanceSegmentation has the loss
_A = self.all_model_classes[1]
_A , _A , _A , _A , _A = self.model_tester.prepare_config_and_inputs()
_A = True
_A = True
_A = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
_A = model(UpperCamelCase__, mask_labels=UpperCamelCase__, class_labels=UpperCamelCase__ )
_A = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_A = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_A = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_A = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=UpperCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_UpperCAmelCase : Any = 1E-4
def _SCREAMING_SNAKE_CASE ( ):
_A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __UpperCAmelCase ( self : str ) -> Optional[int]:
return (
MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco' )
if is_vision_available()
else None
)
def __UpperCAmelCase ( self : Any ) -> List[str]:
_A = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco' ).to(UpperCamelCase__ )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(UpperCamelCase__, return_tensors='pt' ).to(UpperCamelCase__ )
_A = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase__, (1, 3, 8_00, 10_88) )
with torch.no_grad():
_A = model(**UpperCamelCase__ )
_A = torch.tensor(
[[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(UpperCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3], UpperCamelCase__, atol=UpperCamelCase__ ) )
_A = torch.tensor(
[[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(UpperCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3], UpperCamelCase__, atol=UpperCamelCase__ ) )
_A = torch.tensor(
[[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(UpperCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3], UpperCamelCase__, atol=UpperCamelCase__ ) )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
_A = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(UpperCamelCase__ )
.eval()
)
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(UpperCamelCase__, return_tensors='pt' ).to(UpperCamelCase__ )
_A = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase__, (1, 3, 8_00, 10_88) )
with torch.no_grad():
_A = model(**UpperCamelCase__ )
# masks_queries_logits
_A = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape, (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4), )
_A = [
[-1.3_737_124, -1.7_724_937, -1.9_364_233],
[-1.5_977_281, -1.9_867_939, -2.1_523_695],
[-1.5_795_398, -1.9_269_832, -2.093_942],
]
_A = torch.tensor(UpperCamelCase__ ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], UpperCamelCase__, atol=UpperCamelCase__ ) )
# class_queries_logits
_A = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape, (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_A = torch.tensor(
[
[1.6512e00, -5.2572e00, -3.3519e00],
[3.6169e-02, -5.9025e00, -2.9313e00],
[1.0766e-04, -7.7630e00, -5.1263e00],
] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], UpperCamelCase__, atol=UpperCamelCase__ ) )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
_A = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff' )
.to(UpperCamelCase__ )
.eval()
)
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(UpperCamelCase__, return_tensors='pt' ).to(UpperCamelCase__ )
_A = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase__, (1, 3, 8_00, 10_88) )
with torch.no_grad():
_A = model(**UpperCamelCase__ )
# masks_queries_logits
_A = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape, (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4), )
_A = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]]
_A = torch.tensor(UpperCamelCase__ ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], UpperCamelCase__, atol=UpperCamelCase__ ) )
# class_queries_logits
_A = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape, (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_A = torch.tensor(
[[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], UpperCamelCase__, atol=UpperCamelCase__ ) )
def __UpperCAmelCase ( self : Dict ) -> int:
_A = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(UpperCamelCase__ )
.eval()
)
_A = self.default_image_processor
_A = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )], segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )], return_tensors='pt', )
_A = inputs['pixel_values'].to(UpperCamelCase__ )
_A = [el.to(UpperCamelCase__ ) for el in inputs['mask_labels']]
_A = [el.to(UpperCamelCase__ ) for el in inputs['class_labels']]
with torch.no_grad():
_A = model(**UpperCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 107 | 0 |
'''simple docstring'''
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class __magic_name__ ( unittest.TestCase ):
def __init__( self , snake_case) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =parent
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return {}
def lowerCamelCase__ ( ):
'''simple docstring'''
_UpperCAmelCase : Any ='<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR="FFFFFF">\n <HR>\n <a href="http://google.com">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style="color:#0000FF">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>'
_UpperCAmelCase : Optional[Any] ='\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n '
return [html_string_a, html_string_a]
@require_bsa
class __magic_name__ ( lowerCAmelCase ,unittest.TestCase ):
UpperCAmelCase =MarkupLMFeatureExtractor if is_bsa_available() else None
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple =MarkupLMFeatureExtractionTester(self)
@property
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
return self.feature_extract_tester.prepare_feat_extract_dict()
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =self.feature_extraction_class()
# Test not batched input
_UpperCAmelCase : Union[str, Any] =get_html_strings()[0]
_UpperCAmelCase : str =feature_extractor(snake_case)
# fmt: off
_UpperCAmelCase : Optional[Any] =[['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']]
_UpperCAmelCase : str =[['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']]
# fmt: on
self.assertEqual(encoding.nodes , snake_case)
self.assertEqual(encoding.xpaths , snake_case)
# Test batched
_UpperCAmelCase : Dict =get_html_strings()
_UpperCAmelCase : List[Any] =feature_extractor(snake_case)
# fmt: off
_UpperCAmelCase : str =expected_nodes + [['My First Heading', 'My first paragraph.']]
_UpperCAmelCase : Union[str, Any] =expected_xpaths + [['/html/body/h1', '/html/body/p']]
self.assertEqual(len(encoding.nodes) , 2)
self.assertEqual(len(encoding.xpaths) , 2)
self.assertEqual(encoding.nodes , snake_case)
self.assertEqual(encoding.xpaths , snake_case)
| 701 |
'''simple docstring'''
def lowerCamelCase__ ( __lowerCamelCase : int = 1_0_0 ):
'''simple docstring'''
_UpperCAmelCase : int =set()
_UpperCAmelCase : Union[str, Any] =0
_UpperCAmelCase : Optional[Any] =n + 1 # maximum limit
for a in range(2 , __lowerCamelCase ):
for b in range(2 , __lowerCamelCase ):
_UpperCAmelCase : Tuple =a**b # calculates the current power
collect_powers.add(__lowerCamelCase ) # adds the result to the set
return len(__lowerCamelCase )
if __name__ == "__main__":
print('Number of terms ', solution(int(str(input()).strip())))
| 331 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _snake_case ( a__ ):
lowerCAmelCase :Tuple = '''decision_transformer'''
lowerCAmelCase :Union[str, Any] = ['''past_key_values''']
lowerCAmelCase :Optional[Any] = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _lowerCamelCase=17 , _lowerCamelCase=4 , _lowerCamelCase=128 , _lowerCamelCase=4096 , _lowerCamelCase=True , _lowerCamelCase=1 , _lowerCamelCase=1024 , _lowerCamelCase=3 , _lowerCamelCase=1 , _lowerCamelCase=None , _lowerCamelCase="relu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=1e-5 , _lowerCamelCase=0.02 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=5_0256 , _lowerCamelCase=5_0256 , _lowerCamelCase=False , _lowerCamelCase=False , **_lowerCamelCase , ):
UpperCAmelCase__ : Optional[Any] = state_dim
UpperCAmelCase__ : Any = act_dim
UpperCAmelCase__ : Any = hidden_size
UpperCAmelCase__ : List[Any] = max_ep_len
UpperCAmelCase__ : Any = action_tanh
UpperCAmelCase__ : List[Any] = vocab_size
UpperCAmelCase__ : int = n_positions
UpperCAmelCase__ : Optional[Any] = n_layer
UpperCAmelCase__ : Optional[int] = n_head
UpperCAmelCase__ : Dict = n_inner
UpperCAmelCase__ : List[Any] = activation_function
UpperCAmelCase__ : int = resid_pdrop
UpperCAmelCase__ : str = embd_pdrop
UpperCAmelCase__ : List[Any] = attn_pdrop
UpperCAmelCase__ : Optional[int] = layer_norm_epsilon
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : Union[str, Any] = scale_attn_weights
UpperCAmelCase__ : str = use_cache
UpperCAmelCase__ : Tuple = scale_attn_by_inverse_layer_idx
UpperCAmelCase__ : Optional[int] = reorder_and_upcast_attn
UpperCAmelCase__ : Optional[int] = bos_token_id
UpperCAmelCase__ : Dict = eos_token_id
super().__init__(bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase) | 407 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__A =get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class _snake_case ( a__ , unittest.TestCase ):
lowerCAmelCase :List[str] = XLNetTokenizer
lowerCAmelCase :Union[str, Any] = XLNetTokenizerFast
lowerCAmelCase :Union[str, Any] = True
lowerCAmelCase :int = True
def snake_case__ ( self):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ : Optional[Any] = XLNetTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase)
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = """<s>"""
UpperCAmelCase__ : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase) , _lowerCamelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase) , _lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : int = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<unk>""")
self.assertEqual(vocab_keys[1] , """<s>""")
self.assertEqual(vocab_keys[-1] , """<eod>""")
self.assertEqual(len(_lowerCamelCase) , 1006)
def snake_case__ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 1000)
def snake_case__ ( self):
UpperCAmelCase__ : int = XLNetTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase)
UpperCAmelCase__ : str = tokenizer.tokenize("""This is a test""")
self.assertListEqual(_lowerCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase) , [285, 46, 10, 170, 382])
UpperCAmelCase__ : List[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCAmelCase__ : List[str] = tokenizer.convert_tokens_to_ids(_lowerCamelCase)
self.assertListEqual(_lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4])
UpperCAmelCase__ : Optional[int] = tokenizer.convert_ids_to_tokens(_lowerCamelCase)
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = XLNetTokenizer(_lowerCamelCase , do_lower_case=_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""▁he""", """ll""", """o"""])
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = XLNetTokenizer(_lowerCamelCase , do_lower_case=_lowerCamelCase)
UpperCAmelCase__ : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
@slow
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = XLNetTokenizer.from_pretrained("""xlnet-base-cased""")
UpperCAmelCase__ : Dict = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCamelCase)
UpperCAmelCase__ : Dict = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCamelCase)
UpperCAmelCase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase)
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def snake_case__ ( self):
# fmt: off
UpperCAmelCase__ : List[Any] = {"""input_ids""": [[17, 2_1442, 270, 17, 10, 1_4645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 2_2018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 1_4431, 13, 5500, 11, 1176, 580, 13, 1_6819, 4797, 23, 17, 10, 1_7135, 658, 19, 457, 7932, 13, 184, 19, 3154, 1_7135, 6468, 19, 1404, 1_2269, 19, 4229, 5356, 1_6264, 46, 19, 17, 2_0545, 1_0395, 9, 9, 9, 11, 28, 6421, 9531, 2_0729, 17, 10, 353, 1_7022, 11, 21, 6421, 9531, 1_6949, 17, 10, 1_1509, 753, 11, 33, 95, 2421, 7385, 956, 1_4431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 2_4738, 19, 1_3203, 658, 218, 787, 21, 430, 1_8482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 2_2178, 27, 1064, 22, 956, 13, 1_1101, 1429, 5854, 2_4313, 1_8953, 40, 422, 2_4366, 68, 1758, 37, 1_0483, 1_4257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 1_3894, 3380, 23, 95, 18, 1_7634, 2288, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , ) | 407 | 1 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : int
lowerCamelCase : float = 0.0
lowerCamelCase : int = 1
lowerCamelCase : int = 1
lowerCamelCase : bool = True
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Optional[Any] = []
_lowerCAmelCase :List[Any] = []
for i in range(self.num_layers ):
_lowerCAmelCase :List[str] = self.in_channels if i == 0 else self.out_channels
_lowerCAmelCase :str = FlaxResnetBlockaD(
in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_lowerCAmelCase :Dict = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = resnets
_lowerCAmelCase :Tuple = attentions
if self.add_downsample:
_lowerCAmelCase :List[Any] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: Tuple , _UpperCAmelCase: Tuple , _UpperCAmelCase: List[str] , _UpperCAmelCase: str , _UpperCAmelCase: List[str]=True ):
_lowerCAmelCase :Optional[int] = ()
for resnet, attn in zip(self.resnets , self.attentions ):
_lowerCAmelCase :List[str] = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
_lowerCAmelCase :int = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
_lowerCAmelCase :Any = self.downsamplers_a(_UpperCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : int
lowerCamelCase : float = 0.0
lowerCamelCase : int = 1
lowerCamelCase : bool = True
lowerCamelCase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :str = []
for i in range(self.num_layers ):
_lowerCAmelCase :Optional[int] = self.in_channels if i == 0 else self.out_channels
_lowerCAmelCase :str = FlaxResnetBlockaD(
in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_lowerCAmelCase :Tuple = resnets
if self.add_downsample:
_lowerCAmelCase :Dict = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: Dict , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: int=True ):
_lowerCAmelCase :Union[str, Any] = ()
for resnet in self.resnets:
_lowerCAmelCase :Tuple = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
_lowerCAmelCase :Dict = self.downsamplers_a(_UpperCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : int
lowerCamelCase : int
lowerCamelCase : float = 0.0
lowerCamelCase : int = 1
lowerCamelCase : int = 1
lowerCamelCase : bool = True
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :Dict = []
_lowerCAmelCase :Optional[Any] = []
for i in range(self.num_layers ):
_lowerCAmelCase :Dict = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_lowerCAmelCase :Optional[Any] = self.prev_output_channel if i == 0 else self.out_channels
_lowerCAmelCase :int = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_lowerCAmelCase :List[Any] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCAmelCase )
_lowerCAmelCase :str = resnets
_lowerCAmelCase :List[str] = attentions
if self.add_upsample:
_lowerCAmelCase :Union[str, Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: Optional[int] , _UpperCAmelCase: Any , _UpperCAmelCase: int , _UpperCAmelCase: Any , _UpperCAmelCase: str , _UpperCAmelCase: Tuple=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
_lowerCAmelCase :str = res_hidden_states_tuple[-1]
_lowerCAmelCase :Tuple = res_hidden_states_tuple[:-1]
_lowerCAmelCase :Tuple = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_lowerCAmelCase :Any = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
_lowerCAmelCase :List[Any] = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
if self.add_upsample:
_lowerCAmelCase :int = self.upsamplers_a(_UpperCAmelCase )
return hidden_states
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : int
lowerCamelCase : int
lowerCamelCase : float = 0.0
lowerCamelCase : int = 1
lowerCamelCase : bool = True
lowerCamelCase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Union[str, Any] = []
for i in range(self.num_layers ):
_lowerCAmelCase :List[str] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_lowerCAmelCase :Optional[Any] = self.prev_output_channel if i == 0 else self.out_channels
_lowerCAmelCase :int = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = resnets
if self.add_upsample:
_lowerCAmelCase :Union[str, Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Any , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any]=True ):
for resnet in self.resnets:
# pop res hidden states
_lowerCAmelCase :List[str] = res_hidden_states_tuple[-1]
_lowerCAmelCase :int = res_hidden_states_tuple[:-1]
_lowerCAmelCase :Tuple = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_lowerCAmelCase :Union[str, Any] = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
if self.add_upsample:
_lowerCAmelCase :str = self.upsamplers_a(_UpperCAmelCase )
return hidden_states
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : float = 0.0
lowerCamelCase : int = 1
lowerCamelCase : int = 1
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
# there is always at least one resnet
_lowerCAmelCase :Optional[int] = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
_lowerCAmelCase :List[str] = []
for _ in range(self.num_layers ):
_lowerCAmelCase :Optional[int] = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCAmelCase )
_lowerCAmelCase :Dict = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_lowerCAmelCase :str = resnets
_lowerCAmelCase :Optional[Any] = attentions
def __call__( self: List[str] , _UpperCAmelCase: str , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Dict , _UpperCAmelCase: List[Any]=True ):
_lowerCAmelCase :Any = self.resnets[0](_UpperCAmelCase , _UpperCAmelCase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
_lowerCAmelCase :Union[str, Any] = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
_lowerCAmelCase :Tuple = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
return hidden_states | 707 |
def UpperCamelCase_( __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = [0 for i in range(len(__magic_name__ ) )]
# initialize interval's left pointer and right pointer
_lowerCAmelCase , _lowerCAmelCase :List[Any] = 0, 0
for i in range(1 , len(__magic_name__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
_lowerCAmelCase :Any = min(right_pointer - i + 1 , z_result[i - left_pointer] )
_lowerCAmelCase :Any = min_edge
while go_next(__magic_name__ , __magic_name__ , __magic_name__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
_lowerCAmelCase , _lowerCAmelCase :List[Any] = i, i + z_result[i] - 1
return z_result
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : list[int] , __magic_name__ : str ):
"""simple docstring"""
return i + z_result[i] < len(__magic_name__ ) and s[z_result[i]] == s[i + z_result[i]]
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :int = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
_lowerCAmelCase :Optional[Any] = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(__magic_name__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod() | 382 | 0 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCamelCase ( UpperCamelCase_ ):
__a = (IPNDMScheduler,)
__a = (("num_inference_steps", 50),)
def UpperCamelCase_ ( self , **lowerCAmelCase ) -> Dict:
SCREAMING_SNAKE_CASE__: Optional[int]= {'''num_train_timesteps''': 1000}
config.update(**lowerCAmelCase )
return config
def UpperCamelCase_ ( self , lowerCAmelCase=0 , **lowerCAmelCase ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Optional[Any]= dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE__: Optional[int]= kwargs.pop('''num_inference_steps''' , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= self.dummy_sample
SCREAMING_SNAKE_CASE__: Tuple= 0.1 * sample
SCREAMING_SNAKE_CASE__: Any= [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE__: Tuple= self.get_scheduler_config(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE__: Optional[Any]= dummy_past_residuals[:]
if time_step is None:
SCREAMING_SNAKE_CASE__: List[Any]= scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= scheduler_class.from_pretrained(lowerCAmelCase )
new_scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE__: Dict= dummy_past_residuals[:]
SCREAMING_SNAKE_CASE__: Union[str, Any]= scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
SCREAMING_SNAKE_CASE__: Optional[int]= new_scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
SCREAMING_SNAKE_CASE__: List[str]= scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
SCREAMING_SNAKE_CASE__: str= new_scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self ) -> Optional[Any]:
pass
def UpperCamelCase_ ( self , lowerCAmelCase=0 , **lowerCAmelCase ) -> List[Any]:
SCREAMING_SNAKE_CASE__: List[Any]= dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE__: Optional[int]= kwargs.pop('''num_inference_steps''' , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= self.dummy_sample
SCREAMING_SNAKE_CASE__: str= 0.1 * sample
SCREAMING_SNAKE_CASE__: Dict= [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE__: Optional[int]= self.get_scheduler_config()
SCREAMING_SNAKE_CASE__: Tuple= scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE__: List[Any]= dummy_past_residuals[:]
if time_step is None:
SCREAMING_SNAKE_CASE__: Tuple= scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= scheduler_class.from_pretrained(lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE__: int= dummy_past_residuals[:]
SCREAMING_SNAKE_CASE__: Optional[Any]= scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
SCREAMING_SNAKE_CASE__: List[str]= new_scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
SCREAMING_SNAKE_CASE__: Tuple= scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
SCREAMING_SNAKE_CASE__: List[Any]= new_scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self , **lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: List[str]= self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__: List[Any]= self.get_scheduler_config(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= scheduler_class(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= 10
SCREAMING_SNAKE_CASE__: Dict= self.dummy_model()
SCREAMING_SNAKE_CASE__: Optional[int]= self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE__: str= model(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE__: List[Any]= model(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
return sample
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE__: List[Any]= kwargs.pop('''num_inference_steps''' , lowerCAmelCase )
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE__: List[Any]= self.get_scheduler_config()
SCREAMING_SNAKE_CASE__: int= scheduler_class(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= self.dummy_sample
SCREAMING_SNAKE_CASE__: Any= 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCAmelCase , '''set_timesteps''' ):
scheduler.set_timesteps(lowerCAmelCase )
elif num_inference_steps is not None and not hasattr(lowerCAmelCase , '''set_timesteps''' ):
SCREAMING_SNAKE_CASE__: Optional[int]= num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
SCREAMING_SNAKE_CASE__: List[str]= [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
SCREAMING_SNAKE_CASE__: List[Any]= dummy_past_residuals[:]
SCREAMING_SNAKE_CASE__: List[Any]= scheduler.timesteps[5]
SCREAMING_SNAKE_CASE__: str= scheduler.timesteps[6]
SCREAMING_SNAKE_CASE__: List[str]= scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
SCREAMING_SNAKE_CASE__: Any= scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
SCREAMING_SNAKE_CASE__: Union[str, Any]= scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
SCREAMING_SNAKE_CASE__: Any= scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase_ ( self ) -> str:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase , time_step=lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowerCAmelCase , time_step=lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.full_loop()
SCREAMING_SNAKE_CASE__: int= torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 64 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@register_to_config
def __init__(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False , ):
super().__init__()
A_ : Tuple = nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_ )
A_ : List[str] = nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_ )
A_ : Any = False
A_ : Tuple = nn.Dropout(p=lowerCAmelCase_ )
A_ : List[str] = TaConfig(
vocab_size=lowerCAmelCase_ , d_model=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , feed_forward_proj=lowerCAmelCase_ , is_decoder=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , )
A_ : Optional[Any] = nn.ModuleList()
for lyr_num in range(lowerCAmelCase_ ):
A_ : Tuple = TaBlock(lowerCAmelCase_ )
self.encoders.append(lowerCAmelCase_ )
A_ : Any = TaLayerNorm(lowerCAmelCase_ )
A_ : Union[str, Any] = nn.Dropout(p=lowerCAmelCase_ )
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : List[Any] = self.token_embedder(lowerCAmelCase_ )
A_ : Optional[Any] = encoder_input_tokens.shape[1]
A_ : Any = torch.arange(lowerCAmelCase_ , device=encoder_input_tokens.device )
x += self.position_encoding(lowerCAmelCase_ )
A_ : Optional[Any] = self.dropout_pre(lowerCAmelCase_ )
# inverted the attention mask
A_ : int = encoder_input_tokens.size()
A_ : Optional[Any] = self.get_extended_attention_mask(lowerCAmelCase_ , lowerCAmelCase_ )
for lyr in self.encoders:
A_ : int = lyr(lowerCAmelCase_ , lowerCAmelCase_ )[0]
A_ : List[str] = self.layer_norm(lowerCAmelCase_ )
return self.dropout_post(lowerCAmelCase_ ), encoder_inputs_mask
| 180 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def UpperCamelCase ( ):
'''simple docstring'''
A_ : List[Any] = ArgumentParser('Accelerate CLI tool' ,usage='accelerate <command> [<args>]' ,allow_abbrev=__lowercase )
A_ : Any = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=__lowercase )
env_command_parser(subparsers=__lowercase )
launch_command_parser(subparsers=__lowercase )
tpu_command_parser(subparsers=__lowercase )
test_command_parser(subparsers=__lowercase )
# Let's go
A_ : Optional[Any] = parser.parse_args()
if not hasattr(__lowercase ,'func' ):
parser.print_help()
exit(1 )
# Run
args.func(__lowercase )
if __name__ == "__main__":
main()
| 70 | from math import sqrt
def UpperCamelCase ( __lowercase : int = 1_00_00_00 ):
'''simple docstring'''
A_ : int = 0
A_ : int = 0
A_ : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__lowercase ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 70 | 1 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase_ = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''input_values''', '''attention_mask''']
def __init__( self : Tuple , _A : int = 1 , _A : int = 1_6000 , _A : float = 0.0 , _A : bool = False , _A : int = 80 , _A : int = 16 , _A : int = 64 , _A : str = "hann_window" , _A : float = 1.0 , _A : float = 80 , _A : float = 7600 , _A : float = 1e-10 , _A : int = 2 , _A : bool = True , **_A : Optional[Any] , ):
"""simple docstring"""
super().__init__(feature_size=_A , sampling_rate=_A , padding_value=_A , **_A )
__SCREAMING_SNAKE_CASE : List[Any] = do_normalize
__SCREAMING_SNAKE_CASE : Optional[Any] = return_attention_mask
__SCREAMING_SNAKE_CASE : Optional[Any] = num_mel_bins
__SCREAMING_SNAKE_CASE : Dict = hop_length
__SCREAMING_SNAKE_CASE : Any = win_length
__SCREAMING_SNAKE_CASE : Union[str, Any] = win_function
__SCREAMING_SNAKE_CASE : str = frame_signal_scale
__SCREAMING_SNAKE_CASE : Tuple = fmin
__SCREAMING_SNAKE_CASE : Any = fmax
__SCREAMING_SNAKE_CASE : Dict = mel_floor
__SCREAMING_SNAKE_CASE : Union[str, Any] = reduction_factor
__SCREAMING_SNAKE_CASE : List[str] = win_length * sampling_rate // 1000
__SCREAMING_SNAKE_CASE : List[Any] = hop_length * sampling_rate // 1000
__SCREAMING_SNAKE_CASE : Union[str, Any] = optimal_fft_length(self.sample_size )
__SCREAMING_SNAKE_CASE : str = (self.n_fft // 2) + 1
__SCREAMING_SNAKE_CASE : Optional[int] = window_function(window_length=self.sample_size , name=self.win_function , periodic=_A )
__SCREAMING_SNAKE_CASE : Optional[int] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , )
if frame_signal_scale != 1.0:
warnings.warn(
'''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , _A , )
if reduction_factor != 2.0:
warnings.warn(
'''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , _A , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCAmelCase__ ( _A : List[np.ndarray] , _A : List[np.ndarray] , _A : float = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = np.array(_A , np.intaa )
__SCREAMING_SNAKE_CASE : List[Any] = []
for vector, length in zip(_A , attention_mask.sum(-1 ) ):
__SCREAMING_SNAKE_CASE : Tuple = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
__SCREAMING_SNAKE_CASE : Any = padding_value
normed_input_values.append(_A )
else:
__SCREAMING_SNAKE_CASE : int = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def UpperCAmelCase__ ( self : Any , _A : np.ndarray , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = spectrogram(
_A , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , )
return log_mel_spec.T
def __call__( self : Dict , _A : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _A : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _A : Union[bool, str, PaddingStrategy] = False , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : Optional[bool] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[int] = None , **_A : str , ):
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError('''You must provide either `audio` or `audio_target` values.''' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if audio is not None:
__SCREAMING_SNAKE_CASE : str = self._process_audio(
_A , _A , _A , _A , _A , _A , _A , _A , **_A , )
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if audio_target is not None:
__SCREAMING_SNAKE_CASE : List[Any] = self._process_audio(
_A , _A , _A , _A , _A , _A , _A , _A , **_A , )
if inputs is None:
return inputs_target
else:
__SCREAMING_SNAKE_CASE : str = inputs_target['''input_values''']
__SCREAMING_SNAKE_CASE : Dict = inputs_target.get('''attention_mask''' )
if decoder_attention_mask is not None:
__SCREAMING_SNAKE_CASE : Tuple = decoder_attention_mask
return inputs
def UpperCAmelCase__ ( self : Tuple , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : bool = False , _A : Union[bool, str, PaddingStrategy] = False , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : Optional[bool] = None , _A : Optional[Union[str, TensorType]] = None , **_A : str , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = isinstance(_A , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
__SCREAMING_SNAKE_CASE : int = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__SCREAMING_SNAKE_CASE : Tuple = [np.asarray(_A , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
__SCREAMING_SNAKE_CASE : Any = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE : Tuple = speech.astype(np.floataa )
# always return batch
if not is_batched:
__SCREAMING_SNAKE_CASE : Optional[int] = [speech]
# needed to make pad() work on spectrogram inputs
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_size
# convert into correct format for padding
if is_target:
__SCREAMING_SNAKE_CASE : Tuple = [self._extract_mel_features(_A ) for waveform in speech]
__SCREAMING_SNAKE_CASE : Tuple = BatchFeature({'''input_values''': features} )
__SCREAMING_SNAKE_CASE : Any = self.num_mel_bins
else:
__SCREAMING_SNAKE_CASE : Dict = BatchFeature({'''input_values''': speech} )
__SCREAMING_SNAKE_CASE : Dict = self.pad(
_A , padding=_A , max_length=_A , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=_A , **_A , )
__SCREAMING_SNAKE_CASE : List[Any] = feature_size_hack
# convert input values to correct format
__SCREAMING_SNAKE_CASE : str = padded_inputs['''input_values''']
if not isinstance(input_values[0] , np.ndarray ):
__SCREAMING_SNAKE_CASE : Any = [np.asarray(_A , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_A , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
__SCREAMING_SNAKE_CASE : List[Any] = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_A , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE : Any = input_values.astype(np.floataa )
# convert attention_mask to correct format
__SCREAMING_SNAKE_CASE : List[str] = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(_A , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
__SCREAMING_SNAKE_CASE : Optional[Any] = (
attention_mask
if self._get_padding_strategies(_A , max_length=_A ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__SCREAMING_SNAKE_CASE : List[str] = self.zero_mean_unit_var_norm(
padded_inputs['''input_values'''] , attention_mask=_A , padding_value=self.padding_value )
if return_tensors is not None:
__SCREAMING_SNAKE_CASE : str = padded_inputs.convert_to_tensors(_A )
return padded_inputs
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = super().to_dict()
# Don't serialize these as they are derived from the other properties.
__SCREAMING_SNAKE_CASE : int = ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs''']
for name in names:
if name in output:
del output[name]
return output
| 74 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a_ ( ) -> Optional[Any]:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__lowercase ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def a_ ( ) -> Optional[int]:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def a_ ( ) -> Dict:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__lowercase ):
http_head('https://huggingface.co' ) | 686 | 0 |
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = args.pruning_method
_snake_case = args.threshold
_snake_case = args.model_name_or_path.rstrip("/" )
_snake_case = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
_snake_case = torch.load(os.path.join(__lowerCAmelCase , "pytorch_model.bin" ) )
_snake_case = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_snake_case = MagnitudeBinarizer.apply(inputs=__lowerCAmelCase , threshold=__lowerCAmelCase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case = TopKBinarizer.apply(__lowerCAmelCase , __lowerCAmelCase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case = ThresholdBinarizer.apply(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case = -0.1, 1.1
_snake_case = torch.sigmoid(__lowerCAmelCase )
_snake_case = s * (r - l) + l
_snake_case = s_bar.clamp(min=0.0 , max=1.0 )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError("Unknown pruning method" )
if target_model_path is None:
_snake_case = os.path.join(
os.path.dirname(__lowerCAmelCase ) , f'''bertarized_{os.path.basename(__lowerCAmelCase )}''' )
if not os.path.isdir(__lowerCAmelCase ):
shutil.copytree(__lowerCAmelCase , __lowerCAmelCase )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , "pytorch_model.bin" ) )
print("\nPruned model saved! See you later!" )
if __name__ == "__main__":
__magic_name__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
__magic_name__ : Union[str, Any] = parser.parse_args()
main(args)
| 718 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ : Any = ReformerTokenizer
UpperCAmelCase__ : List[str] = ReformerTokenizerFast
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Optional[Any] = True
def UpperCamelCase( self ):
super().setUp()
_snake_case = ReformerTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase( self ):
_snake_case = "<s>"
_snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def UpperCamelCase( self ):
_snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(lowerCamelCase ) , 1_000 )
def UpperCamelCase( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def UpperCamelCase( self ):
if not self.test_rust_tokenizer:
return
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
_snake_case = "I was born in 92000, and this is falsé."
_snake_case = tokenizer.tokenize(lowerCamelCase )
_snake_case = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
_snake_case = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
_snake_case = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
_snake_case = self.get_rust_tokenizer()
_snake_case = tokenizer.encode(lowerCamelCase )
_snake_case = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
# Simple input
_snake_case = "This is a simple input"
_snake_case = ["This is a simple input 1", "This is a simple input 2"]
_snake_case = ("This is a simple input", "This is a pair")
_snake_case = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowerCamelCase , tokenizer_r.encode , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" )
# Simple input
self.assertRaises(lowerCamelCase , tokenizer_r.encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" )
# Simple input
self.assertRaises(
lowerCamelCase , tokenizer_r.batch_encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" , )
# Pair input
self.assertRaises(lowerCamelCase , tokenizer_r.encode , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" )
# Pair input
self.assertRaises(lowerCamelCase , tokenizer_r.encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" )
# Pair input
self.assertRaises(
lowerCamelCase , tokenizer_r.batch_encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" , )
def UpperCamelCase( self ):
pass
def UpperCamelCase( self ):
_snake_case = ReformerTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
_snake_case = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [285, 46, 10, 170, 382] , )
_snake_case = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_snake_case = tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_snake_case = tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def UpperCamelCase( self ):
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def UpperCamelCase( self ):
_snake_case = "Hello World!"
_snake_case = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) )
@slow
def UpperCamelCase( self ):
_snake_case = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
_snake_case = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) )
@require_torch
@slow
def UpperCamelCase( self ):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
_snake_case = list(self.big_tokenizer.get_vocab().keys() )[:10]
_snake_case = " ".join(lowerCamelCase )
_snake_case = self.big_tokenizer.encode_plus(lowerCamelCase , return_tensors="pt" )
_snake_case = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
_snake_case = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
_snake_case = encoded_sequence["input_ids"].shape
_snake_case = ReformerModel(lowerCamelCase )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCamelCase )
model(**lowerCamelCase )
@slow
def UpperCamelCase( self ):
# fmt: off
_snake_case = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
_snake_case = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=lowerCamelCase , sequences=lowerCamelCase , )
| 368 | 0 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> list[str]:
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
_UpperCAmelCase = number_of_bytes // partitions
_UpperCAmelCase = []
for i in range(__snake_case ):
_UpperCAmelCase = i * bytes_per_partition + 1
_UpperCAmelCase = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f"""{start_bytes}-{end_bytes}""" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod() | 108 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
a = {
"configuration_speecht5": [
"SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP",
"SpeechT5Config",
"SpeechT5HifiGanConfig",
],
"feature_extraction_speecht5": ["SpeechT5FeatureExtractor"],
"processing_speecht5": ["SpeechT5Processor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["SpeechT5Tokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"SpeechT5ForSpeechToText",
"SpeechT5ForSpeechToSpeech",
"SpeechT5ForTextToSpeech",
"SpeechT5Model",
"SpeechT5PreTrainedModel",
"SpeechT5HifiGan",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 518 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _snake_case ( _UpperCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE : List[str] = KandinskyVaaControlnetImgaImgPipeline
SCREAMING_SNAKE_CASE : List[str] = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
SCREAMING_SNAKE_CASE : Optional[Any] = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
SCREAMING_SNAKE_CASE : Tuple = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
SCREAMING_SNAKE_CASE : Dict = False
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return 32
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return 32
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return 1_00
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCAmelCase = UNetaDConditionModel(**A_ )
return model
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.dummy_unet
lowerCAmelCase = self.dummy_movq
lowerCAmelCase = {
'num_train_timesteps': 10_00,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowerCAmelCase = DDIMScheduler(**A_ )
lowerCAmelCase = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A_ ) ).to(A_ )
lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A_ )
# create init_image
lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ )
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase = Image.fromarray(np.uinta(A_ ) ).convert('RGB' ).resize((2_56, 2_56) )
# create hint
lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ )
if str(A_ ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(A_ )
else:
lowerCAmelCase = torch.Generator(device=A_ ).manual_seed(A_ )
lowerCAmelCase = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = 'cpu'
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**A_ )
lowerCAmelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
lowerCAmelCase = pipe(**self.get_dummy_inputs(A_ ) )
lowerCAmelCase = output.images
lowerCAmelCase = pipe(
**self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCAmelCase = init_image.resize((5_12, 5_12) )
lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
lowerCAmelCase = torch.from_numpy(np.array(A_ ) ).float() / 2_55.0
lowerCAmelCase = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
lowerCAmelCase = 'A robot, 4k photo'
lowerCAmelCase = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(A_ )
lowerCAmelCase = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
lowerCAmelCase = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
lowerCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase , lowerCAmelCase = pipe_prior(
A_ , image=A_ , strength=0.85 , generator=A_ , negative_prompt='' , ).to_tuple()
lowerCAmelCase = pipeline(
image=A_ , image_embeds=A_ , negative_image_embeds=A_ , hint=A_ , generator=A_ , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type='np' , )
lowerCAmelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(A_ , A_ )
| 705 |
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
_UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class _snake_case :
def __init__( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if not conversation_id:
lowerCAmelCase = uuid.uuida()
if past_user_inputs is None:
lowerCAmelCase = []
if generated_responses is None:
lowerCAmelCase = []
lowerCAmelCase = conversation_id
lowerCAmelCase = past_user_inputs
lowerCAmelCase = generated_responses
lowerCAmelCase = text
def __eq__( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
F'with: "{text}".' )
lowerCAmelCase = text
else:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
F'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
lowerCAmelCase = text
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
lowerCAmelCase = None
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
self.generated_responses.append(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
'''simple docstring'''
lowerCAmelCase = F'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
lowerCAmelCase = 'user' if is_user else 'bot'
output += F'{name} >> {text} \n'
return output
@add_end_docstrings(
a_ , R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class _snake_case ( a_ ):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if self.tokenizer.pad_token_id is None:
lowerCAmelCase = self.tokenizer.eos_token
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = {}
lowerCAmelCase = {}
lowerCAmelCase = {}
if min_length_for_response is not None:
lowerCAmelCase = min_length_for_response
if minimum_tokens is not None:
lowerCAmelCase = minimum_tokens
if "max_length" in generate_kwargs:
lowerCAmelCase = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
lowerCAmelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_SCREAMING_SNAKE_CASE )
return preprocess_params, forward_params, postprocess_params
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = super().__call__(_SCREAMING_SNAKE_CASE , num_workers=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) == 1:
return outputs[0]
return outputs
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=32 ):
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
F'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
lowerCAmelCase = self.tokenizer._build_conversation_input_ids(_SCREAMING_SNAKE_CASE )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
lowerCAmelCase = self._legacy_parse_and_tokenize(_SCREAMING_SNAKE_CASE )
if self.framework == "pt":
lowerCAmelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
lowerCAmelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=10 , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = generate_kwargs.get('max_length' , self.model.config.max_length )
lowerCAmelCase = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
lowerCAmelCase = max_length - minimum_tokens
lowerCAmelCase = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
lowerCAmelCase = model_inputs['attention_mask'][:, -trim:]
lowerCAmelCase = model_inputs.pop('conversation' )
lowerCAmelCase = max_length
lowerCAmelCase = self.model.generate(**_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if self.model.config.is_encoder_decoder:
lowerCAmelCase = 1
else:
lowerCAmelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
lowerCAmelCase = model_outputs['output_ids']
lowerCAmelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(_SCREAMING_SNAKE_CASE )
return conversation
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = self.tokenizer.eos_token_id
lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) > self.tokenizer.model_max_length:
lowerCAmelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 514 | 0 |
import random
from .binary_exp_mod import bin_exp_mod
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1_000 ):
'''simple docstring'''
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__UpperCamelCase :List[str] = n - 1
__UpperCamelCase :Union[str, Any] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__UpperCamelCase :int = 0
while count < prec:
__UpperCamelCase :int = random.randint(2 , n - 1 )
__UpperCamelCase :Union[str, Any] = bin_exp_mod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if b != 1:
__UpperCamelCase :Any = True
for _ in range(SCREAMING_SNAKE_CASE ):
if b == n - 1:
__UpperCamelCase :int = False
break
__UpperCamelCase :Union[str, Any] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
__lowercase = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 167 | import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if (
(cp >= 0X4_e_0_0 and cp <= 0X9_f_f_f)
or (cp >= 0X3_4_0_0 and cp <= 0X4_d_b_f) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_a_6_d_f) #
or (cp >= 0X2_a_7_0_0 and cp <= 0X2_b_7_3_f) #
or (cp >= 0X2_b_7_4_0 and cp <= 0X2_b_8_1_f) #
or (cp >= 0X2_b_8_2_0 and cp <= 0X2_c_e_a_f) #
or (cp >= 0Xf_9_0_0 and cp <= 0Xf_a_f_f)
or (cp >= 0X2_f_8_0_0 and cp <= 0X2_f_a_1_f) #
): #
return True
return False
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for char in word:
__UpperCamelCase :str = ord(SCREAMING_SNAKE_CASE )
if not _is_chinese_char(SCREAMING_SNAKE_CASE ):
return 0
return 1
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = set()
for token in tokens:
__UpperCamelCase :Tuple = len(SCREAMING_SNAKE_CASE ) > 1 and is_chinese(SCREAMING_SNAKE_CASE )
if chinese_word:
word_set.add(SCREAMING_SNAKE_CASE )
__UpperCamelCase :int = list(SCREAMING_SNAKE_CASE )
return word_list
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
__UpperCamelCase :Dict = max([len(SCREAMING_SNAKE_CASE ) for w in chinese_word_set] )
__UpperCamelCase :str = bert_tokens
__UpperCamelCase , __UpperCamelCase :int = 0, len(SCREAMING_SNAKE_CASE )
while start < end:
__UpperCamelCase :Optional[int] = True
if is_chinese(bert_word[start] ):
__UpperCamelCase :Dict = min(end - start , SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE , 1 , -1 ):
__UpperCamelCase :int = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__UpperCamelCase :Union[str, Any] = '''##''' + bert_word[j]
__UpperCamelCase :Dict = start + i
__UpperCamelCase :Dict = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = []
for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 100 ):
__UpperCamelCase :List[str] = ltp_tokenizer.seg(lines[i : i + 100] )[0]
__UpperCamelCase :int = [get_chinese_word(SCREAMING_SNAKE_CASE ) for r in res]
ltp_res.extend(SCREAMING_SNAKE_CASE )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = []
for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 100 ):
__UpperCamelCase :List[str] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = []
for input_ids, chinese_word in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :str = []
for id in input_ids:
__UpperCamelCase :Dict = bert_tokenizer._convert_id_to_token(SCREAMING_SNAKE_CASE )
input_tokens.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = add_sub_symbol(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(SCREAMING_SNAKE_CASE ):
if token[:2] == "##":
__UpperCamelCase :Union[str, Any] = token[2:]
# save chinese tokens' pos
if len(SCREAMING_SNAKE_CASE ) == 1 and _is_chinese_char(ord(SCREAMING_SNAKE_CASE ) ):
ref_id.append(SCREAMING_SNAKE_CASE )
ref_ids.append(SCREAMING_SNAKE_CASE )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
return ref_ids
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
__UpperCamelCase :Any = f.readlines()
__UpperCamelCase :str = [line.strip() for line in data if len(SCREAMING_SNAKE_CASE ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__UpperCamelCase :Optional[Any] = LTP(args.ltp ) # faster in GPU device
__UpperCamelCase :Union[str, Any] = BertTokenizer.from_pretrained(args.bert )
__UpperCamelCase :Any = prepare_ref(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
__UpperCamelCase :Optional[Any] = [json.dumps(SCREAMING_SNAKE_CASE ) + '''\n''' for ref in ref_ids]
f.writelines(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
__lowercase = parser.parse_args()
main(args)
| 167 | 1 |
def snake_case__ ( __SCREAMING_SNAKE_CASE = 1000 ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ = 1, 1
UpperCAmelCase_ = 2
while True:
UpperCAmelCase_ = 0
UpperCAmelCase_ = fa + fa
UpperCAmelCase_ , UpperCAmelCase_ = fa, f
index += 1
for _ in str(__SCREAMING_SNAKE_CASE ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 23 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
),
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = 'xlm-roberta'
def __init__( self , lowerCAmelCase=3_0522 , lowerCAmelCase=768 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=3072 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=512 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1e-1_2 , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase="absolute" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
@property
def A__ ( self ):
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 23 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__snake_case : List[Any] = logging.get_logger(__name__)
__snake_case : Optional[Any] = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'deta'
SCREAMING_SNAKE_CASE = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , _SCREAMING_SNAKE_CASE: int=900 , _SCREAMING_SNAKE_CASE: str=2048 , _SCREAMING_SNAKE_CASE: Any=6 , _SCREAMING_SNAKE_CASE: Tuple=2048 , _SCREAMING_SNAKE_CASE: Optional[Any]=8 , _SCREAMING_SNAKE_CASE: Union[str, Any]=6 , _SCREAMING_SNAKE_CASE: str=1024 , _SCREAMING_SNAKE_CASE: List[str]=8 , _SCREAMING_SNAKE_CASE: List[Any]=0.0 , _SCREAMING_SNAKE_CASE: Dict=True , _SCREAMING_SNAKE_CASE: List[str]="relu" , _SCREAMING_SNAKE_CASE: Any=256 , _SCREAMING_SNAKE_CASE: Tuple=0.1 , _SCREAMING_SNAKE_CASE: Optional[int]=0.0 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.0 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.02 , _SCREAMING_SNAKE_CASE: Any=1.0 , _SCREAMING_SNAKE_CASE: Dict=True , _SCREAMING_SNAKE_CASE: Optional[int]=False , _SCREAMING_SNAKE_CASE: int="sine" , _SCREAMING_SNAKE_CASE: Any=5 , _SCREAMING_SNAKE_CASE: Optional[int]=4 , _SCREAMING_SNAKE_CASE: Optional[Any]=4 , _SCREAMING_SNAKE_CASE: Union[str, Any]=True , _SCREAMING_SNAKE_CASE: Any=300 , _SCREAMING_SNAKE_CASE: int=True , _SCREAMING_SNAKE_CASE: List[str]=True , _SCREAMING_SNAKE_CASE: str=1 , _SCREAMING_SNAKE_CASE: int=5 , _SCREAMING_SNAKE_CASE: List[str]=2 , _SCREAMING_SNAKE_CASE: Union[str, Any]=1 , _SCREAMING_SNAKE_CASE: Any=1 , _SCREAMING_SNAKE_CASE: Any=5 , _SCREAMING_SNAKE_CASE: int=2 , _SCREAMING_SNAKE_CASE: int=0.1 , _SCREAMING_SNAKE_CASE: List[str]=0.25 , **_SCREAMING_SNAKE_CASE: int , ) -> Dict:
"""simple docstring"""
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
__lowerCAmelCase : Any = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"])
else:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : List[str] = backbone_config.pop("model_type")
__lowerCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type]
__lowerCAmelCase : Optional[Any] = config_class.from_dict(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = backbone_config
__lowerCAmelCase : List[Any] = num_queries
__lowerCAmelCase : Any = max_position_embeddings
__lowerCAmelCase : Optional[Any] = d_model
__lowerCAmelCase : str = encoder_ffn_dim
__lowerCAmelCase : Union[str, Any] = encoder_layers
__lowerCAmelCase : int = encoder_attention_heads
__lowerCAmelCase : Optional[int] = decoder_ffn_dim
__lowerCAmelCase : int = decoder_layers
__lowerCAmelCase : List[str] = decoder_attention_heads
__lowerCAmelCase : Optional[Any] = dropout
__lowerCAmelCase : str = attention_dropout
__lowerCAmelCase : Tuple = activation_dropout
__lowerCAmelCase : Tuple = activation_function
__lowerCAmelCase : Dict = init_std
__lowerCAmelCase : Optional[Any] = init_xavier_std
__lowerCAmelCase : Any = encoder_layerdrop
__lowerCAmelCase : Optional[int] = auxiliary_loss
__lowerCAmelCase : Any = position_embedding_type
# deformable attributes
__lowerCAmelCase : List[str] = num_feature_levels
__lowerCAmelCase : int = encoder_n_points
__lowerCAmelCase : List[Any] = decoder_n_points
__lowerCAmelCase : Optional[Any] = two_stage
__lowerCAmelCase : Optional[int] = two_stage_num_proposals
__lowerCAmelCase : Any = with_box_refine
__lowerCAmelCase : Any = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True.")
# Hungarian matcher
__lowerCAmelCase : Dict = class_cost
__lowerCAmelCase : Optional[Any] = bbox_cost
__lowerCAmelCase : Tuple = giou_cost
# Loss coefficients
__lowerCAmelCase : str = mask_loss_coefficient
__lowerCAmelCase : List[Any] = dice_loss_coefficient
__lowerCAmelCase : List[Any] = bbox_loss_coefficient
__lowerCAmelCase : Optional[int] = giou_loss_coefficient
__lowerCAmelCase : List[str] = eos_coefficient
__lowerCAmelCase : int = focal_alpha
super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
@property
def _SCREAMING_SNAKE_CASE ( self: Any) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def _SCREAMING_SNAKE_CASE ( self: Any) -> int:
"""simple docstring"""
return self.d_model
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = copy.deepcopy(self.__dict__)
__lowerCAmelCase : Union[str, Any] = self.backbone_config.to_dict()
__lowerCAmelCase : Dict = self.__class__.model_type
return output | 293 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@register_to_config
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: float , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: bool = False , ) -> Any:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : str = nn.Embedding(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = nn.Embedding(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = False
__lowerCAmelCase : Optional[Any] = nn.Dropout(p=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = TaConfig(
vocab_size=_SCREAMING_SNAKE_CASE , d_model=_SCREAMING_SNAKE_CASE , num_heads=_SCREAMING_SNAKE_CASE , d_kv=_SCREAMING_SNAKE_CASE , d_ff=_SCREAMING_SNAKE_CASE , dropout_rate=_SCREAMING_SNAKE_CASE , feed_forward_proj=_SCREAMING_SNAKE_CASE , is_decoder=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = nn.ModuleList()
for lyr_num in range(_SCREAMING_SNAKE_CASE):
__lowerCAmelCase : int = TaBlock(_SCREAMING_SNAKE_CASE)
self.encoders.append(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = TaLayerNorm(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = nn.Dropout(p=_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: str) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.token_embedder(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = encoder_input_tokens.shape[1]
__lowerCAmelCase : List[Any] = torch.arange(_SCREAMING_SNAKE_CASE , device=encoder_input_tokens.device)
x += self.position_encoding(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = self.dropout_pre(_SCREAMING_SNAKE_CASE)
# inverted the attention mask
__lowerCAmelCase : List[Any] = encoder_input_tokens.size()
__lowerCAmelCase : Any = self.get_extended_attention_mask(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
for lyr in self.encoders:
__lowerCAmelCase : Union[str, Any] = lyr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)[0]
__lowerCAmelCase : int = self.layer_norm(_SCREAMING_SNAKE_CASE)
return self.dropout_post(_SCREAMING_SNAKE_CASE), encoder_inputs_mask | 293 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
UpperCamelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase = {
"""vocab_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"""
),
"""google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""",
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"""
),
"""google/electra-base-generator""": (
"""https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"""
),
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"""
),
},
}
UpperCamelCase = {
"""google/electra-small-generator""": 512,
"""google/electra-base-generator""": 512,
"""google/electra-large-generator""": 512,
"""google/electra-small-discriminator""": 512,
"""google/electra-base-discriminator""": 512,
"""google/electra-large-discriminator""": 512,
}
UpperCamelCase = {
"""google/electra-small-generator""": {"""do_lower_case""": True},
"""google/electra-base-generator""": {"""do_lower_case""": True},
"""google/electra-large-generator""": {"""do_lower_case""": True},
"""google/electra-small-discriminator""": {"""do_lower_case""": True},
"""google/electra-base-discriminator""": {"""do_lower_case""": True},
"""google/electra-large-discriminator""": {"""do_lower_case""": True},
}
class lowerCAmelCase_ ( __UpperCAmelCase ):
_UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[str] = ElectraTokenizer
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase="[UNK]" , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="[PAD]" , _lowerCAmelCase="[CLS]" , _lowerCAmelCase="[MASK]" , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ):
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
_lowercase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _lowerCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _lowerCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _lowerCAmelCase ) != tokenize_chinese_chars
):
_lowercase : Optional[Any] = getattr(_lowerCAmelCase , normalizer_state.pop('type' ) )
_lowercase : str = do_lower_case
_lowercase : List[str] = strip_accents
_lowercase : Union[str, Any] = tokenize_chinese_chars
_lowercase : List[Any] = normalizer_class(**_lowerCAmelCase )
_lowercase : List[str] = do_lower_case
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=None ):
_lowercase : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
_lowercase : List[Any] = [self.sep_token_id]
_lowercase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
_lowercase : Tuple = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 717 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , _lowerCAmelCase=1_0_0_0 , ):
_lowercase : List[str] = parent
_lowercase : Optional[Any] = batch_size
_lowercase : str = seq_length
_lowercase : Dict = is_training
_lowercase : Optional[int] = use_input_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : Union[str, Any] = use_labels
_lowercase : Optional[Any] = vocab_size
_lowercase : Optional[Any] = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[Any] = hidden_act
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : int = max_position_embeddings
_lowercase : str = type_vocab_size
_lowercase : Tuple = type_sequence_label_size
_lowercase : Dict = initializer_range
_lowercase : List[Any] = num_labels
_lowercase : List[str] = num_choices
_lowercase : Dict = scope
_lowercase : List[Any] = range_bbox
def __a ( self ):
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowercase : List[str] = bbox[i, j, 3]
_lowercase : Optional[int] = bbox[i, j, 1]
_lowercase : int = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowercase : Dict = bbox[i, j, 2]
_lowercase : Dict = bbox[i, j, 0]
_lowercase : int = t
_lowercase : Union[str, Any] = tf.convert_to_tensor(_lowerCAmelCase )
_lowercase : Any = None
if self.use_input_mask:
_lowercase : int = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Tuple = None
if self.use_token_type_ids:
_lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Tuple = None
_lowercase : Union[str, Any] = None
_lowercase : List[str] = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : str = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : Any = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = TFLayoutLMModel(config=_lowerCAmelCase )
_lowercase : List[Any] = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowercase : List[Any] = model(_lowerCAmelCase , _lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowercase : List[str] = model(_lowerCAmelCase , _lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = TFLayoutLMForMaskedLM(config=_lowerCAmelCase )
_lowercase : Any = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : str = self.num_labels
_lowercase : Tuple = TFLayoutLMForSequenceClassification(config=_lowerCAmelCase )
_lowercase : int = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Any = self.num_labels
_lowercase : Optional[int] = TFLayoutLMForTokenClassification(config=_lowerCAmelCase )
_lowercase : Union[str, Any] = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Union[str, Any] = TFLayoutLMForQuestionAnswering(config=_lowerCAmelCase )
_lowercase : str = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self ):
_lowercase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : List[Any] = config_and_inputs
_lowercase : Optional[Any] = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : Optional[int] = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
_UpperCamelCase : Union[str, Any] = (
{
"feature-extraction": TFLayoutLMModel,
"fill-mask": TFLayoutLMForMaskedLM,
"text-classification": TFLayoutLMForSequenceClassification,
"token-classification": TFLayoutLMForTokenClassification,
"zero-shot": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase : str = False
_UpperCamelCase : List[str] = True
_UpperCamelCase : Tuple = 10
def __a ( self ):
_lowercase : Optional[int] = TFLayoutLMModelTester(self )
_lowercase : str = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 )
def __a ( self ):
self.config_tester.run_common_tests()
def __a ( self ):
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
@slow
def __a ( self ):
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : List[Any] = TFLayoutLMModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def __a ( self ):
pass
def __magic_name__ ( ) -> Optional[int]:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
_lowercase : Optional[Any] = tf.convert_to_tensor([[101,1_019,1_014,1_016,1_037,12_849,4_747,1_004,14_246,2_278,5_439,4_524,5_002,2_930,2_193,2_930,4_341,3_208,1_005,1_055,2_171,2_848,11_300,3_531,102],[101,4_070,4_034,7_020,1_024,3_058,1_015,1_013,2_861,1_013,6_070,19_274,2_772,6_205,27_814,16_147,16_147,4_343,2_047,10_283,10_969,14_389,1_012,2_338,102]] ) # noqa: E231
_lowercase : Tuple = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
_lowercase : Optional[int] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1_000,1_000,1_000,1_000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1_000,1_000,1_000,1_000]]] ) # noqa: E231
_lowercase : int = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
_lowercase : Union[str, Any] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __a ( self ):
_lowercase : Tuple = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : Tuple = model(input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
# test the sequence output on [0, :3, :3]
_lowercase : Optional[Any] = tf.convert_to_tensor(
[[0.17_85, -0.19_47, -0.04_25], [-0.32_54, -0.28_07, 0.25_53], [-0.53_91, -0.33_22, 0.33_64]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=1E-3 ) )
# test the pooled output on [1, :3]
_lowercase : Optional[int] = tf.convert_to_tensor([-0.65_80, -0.02_14, 0.85_52] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _lowerCAmelCase , atol=1E-3 ) )
@slow
def __a ( self ):
# initialize model with randomly initialized sequence classification head
_lowercase : Optional[Any] = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Optional[Any] = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : Any = model(
input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
_lowercase : List[Any] = outputs.loss
_lowercase : Any = (2,)
self.assertEqual(loss.shape , _lowerCAmelCase )
# test the shape of the logits
_lowercase : str = outputs.logits
_lowercase : Dict = (2, 2)
self.assertEqual(logits.shape , _lowerCAmelCase )
@slow
def __a ( self ):
# initialize model with randomly initialized token classification head
_lowercase : Dict = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=1_3 )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : str = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : Dict = model(
input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
# test the shape of the logits
_lowercase : Dict = outputs.logits
_lowercase : Optional[Any] = tf.convert_to_tensor((2, 2_5, 1_3) )
self.assertEqual(logits.shape , _lowerCAmelCase )
@slow
def __a ( self ):
# initialize model with randomly initialized token classification head
_lowercase : Union[str, Any] = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : List[Any] = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : int = model(input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
# test the shape of the logits
_lowercase : Any = tf.convert_to_tensor((2, 2_5) )
self.assertEqual(outputs.start_logits.shape , _lowerCAmelCase )
self.assertEqual(outputs.end_logits.shape , _lowerCAmelCase )
| 677 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ : Optional[int] = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Dict = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
__magic_name__ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 497 |
'''simple docstring'''
from __future__ import annotations
def A__ ( A_ ) -> list[int]: # This function is recursive
_lowercase = len(A_ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
_lowercase = array[0]
_lowercase = False
_lowercase = 1
_lowercase = []
while not is_found and i < array_length:
if array[i] < pivot:
_lowercase = True
_lowercase = [element for element in array[i:] if element >= array[i]]
_lowercase = longest_subsequence(A_ )
if len(A_ ) > len(A_ ):
_lowercase = temp_array
else:
i += 1
_lowercase = [element for element in array[1:] if element >= pivot]
_lowercase = [pivot, *longest_subsequence(A_ )]
if len(A_ ) > len(A_ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 497 | 1 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowercase_: Optional[int] = get_logger(__name__)
class lowercase__ :
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = 'dummy_data'
__UpperCamelCase : int = 'datasets'
__UpperCamelCase : Dict = False
def __init__( self : str , __a : str , __a : str , __a : Union[Version, str] , __a : Optional[str] = None , __a : bool = False , __a : bool = True , __a : Optional[List[Callable]] = None , ):
snake_case__ : int = 0
snake_case__ : Optional[int] = dataset_name
snake_case__ : List[str] = cache_dir
snake_case__ : Any = use_local_dummy_data
snake_case__ : Optional[Any] = config
# download_callbacks take a single url as input
snake_case__ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
snake_case__ : List[Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
snake_case__ : int = str(__a )
# to be downloaded
snake_case__ : Optional[Any] = None
snake_case__ : Dict = None
@property
def lowercase ( self : Tuple ):
if self._dummy_file is None:
snake_case__ : Tuple = self.download_dummy_data()
return self._dummy_file
@property
def lowercase ( self : Dict ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def lowercase ( self : str ):
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def lowercase ( self : List[Any] ):
snake_case__ : Dict = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
snake_case__ : Dict = cached_path(
__a , cache_dir=self.cache_dir , extract_compressed_file=__a , force_extract=__a )
return os.path.join(__a , self.dummy_file_name )
@property
def lowercase ( self : Any ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def lowercase ( self : Dict ):
if self._bucket_url is None:
snake_case__ : Optional[int] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def lowercase ( self : Any ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def lowercase ( self : Optional[int] , __a : Union[str, Any] , *__a : Optional[int] ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
snake_case__ : Any = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
snake_case__ : Optional[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__a , __a ):
return self.create_dummy_data_dict(__a , __a )
elif isinstance(__a , (list, tuple) ):
return self.create_dummy_data_list(__a , __a )
else:
return self.create_dummy_data_single(__a , __a )
def lowercase ( self : Optional[int] , __a : Dict , *__a : Tuple ):
return self.download_and_extract(__a )
def lowercase ( self : List[str] , __a : str , __a : str ):
return self.download_and_extract(__a )
def lowercase ( self : Optional[int] , __a : int , *__a : str , **__a : Optional[Any] ):
return path
def lowercase ( self : Union[str, Any] ):
return {}
def lowercase ( self : Union[str, Any] , __a : Tuple , __a : Optional[Any] ):
snake_case__ : str = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__a , __a ):
for single_url in single_urls:
download_callback(__a )
else:
snake_case__ : str = single_urls
download_callback(__a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__a , __a ):
snake_case__ : Any = [os.path.join(__a , urllib.parse.quote_plus(Path(__a ).name ) ) for x in single_urls]
else:
snake_case__ : List[Any] = single_urls
snake_case__ : Tuple = os.path.join(__a , urllib.parse.quote_plus(Path(__a ).name ) )
snake_case__ : List[str] = value
# make sure that values are unique
if all(isinstance(__a , __a ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
snake_case__ : Optional[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def lowercase ( self : List[str] , __a : str , __a : Any ):
snake_case__ : Dict = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
snake_case__ : int = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , __a ) ) for url in data_url )
snake_case__ : Optional[Any] = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
snake_case__ : List[Any] = [data_url[0]] * len(__a )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : Optional[Any] = os.path.join(__a , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(__a )
return dummy_data_list
def lowercase ( self : Any , __a : Any , __a : Union[str, Any] ):
for download_callback in self.download_callbacks:
download_callback(__a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : Optional[int] = os.path.join(__a , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(__a ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def lowercase ( self : Dict ):
pass
def lowercase ( self : str ):
pass
def lowercase ( self : Optional[Any] , __a : Optional[Any] ):
def _iter_archive_members(__a : Optional[int] ):
# this preserves the order of the members inside the ZIP archive
snake_case__ : str = Path(self.dummy_file ).parent
snake_case__ : Optional[Any] = path.relative_to(__a )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
snake_case__ : Union[str, Any] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__a )
snake_case__ : List[str] = Path(__a )
snake_case__ : Dict = _iter_archive_members(__a ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(__a ).as_posix(), file_path.open("""rb""" )
def lowercase ( self : Union[str, Any] , __a : Union[str, Any] ):
if not isinstance(__a , __a ):
snake_case__ : int = [paths]
for path in paths:
if os.path.isfile(__a ):
if os.path.basename(__a ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__a ):
if os.path.basename(__a ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(__a ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(__a , __a )
| 127 |
from __future__ import annotations
from typing import Any
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
if not postfix_notation:
return 0
snake_case__ : List[str] = {"""+""", """-""", """*""", """/"""}
snake_case__ : list[Any] = []
for token in postfix_notation:
if token in operations:
snake_case__ , snake_case__ : Tuple = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b)
elif token == "-":
stack.append(a - b)
elif token == "*":
stack.append(a * b)
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1)
else:
stack.append(a // b)
else:
stack.append(int(UpperCAmelCase_))
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 127 | 1 |
'''simple docstring'''
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
a : List[Any] = '''\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
'''
a : Union[str, Any] = '''\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
'''
a : str = '''
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"precision": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'precision@10\': 1.0}
'''
def __UpperCAmelCase ( _UpperCAmelCase : List[str] , _UpperCAmelCase : str ) -> Tuple:
return float((preds == labels).mean() )
def __UpperCAmelCase ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] ) -> int:
__snake_case = simple_accuracy(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = float(fa_score(y_true=_UpperCAmelCase , y_pred=_UpperCAmelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def __UpperCAmelCase ( _UpperCAmelCase : List[str] , _UpperCAmelCase : str ) -> Union[str, Any]:
__snake_case = np.array(_UpperCAmelCase )
__snake_case = np.array(_UpperCAmelCase )
__snake_case = en_sentvecs.shape[0]
# mean centering
__snake_case = en_sentvecs - np.mean(_UpperCAmelCase , axis=0 )
__snake_case = in_sentvecs - np.mean(_UpperCAmelCase , axis=0 )
__snake_case = cdist(_UpperCAmelCase , _UpperCAmelCase , "cosine" )
__snake_case = np.array(range(_UpperCAmelCase ) )
__snake_case = sim.argsort(axis=1 )[:, :10]
__snake_case = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def A ( self : Optional[Any] ):
"""simple docstring"""
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
"references": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , )
def A ( self : str , a_ : Any , a_ : Optional[int] ):
"""simple docstring"""
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(a_ , a_ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(a_ , a_ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(a_ , a_ )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
| 69 |
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCAmelCase ( snake_case : int , snake_case : int , snake_case : float = 1 / sqrt(2 ) ):
_lowerCAmelCase:Union[str, Any] = tau * frequency / samplerate
_lowerCAmelCase:str = sin(snake_case )
_lowerCAmelCase:List[str] = cos(snake_case )
_lowerCAmelCase:Optional[int] = _sin / (2 * q_factor)
_lowerCAmelCase:int = (1 - _cos) / 2
_lowerCAmelCase:Any = 1 - _cos
_lowerCAmelCase:Any = 1 + alpha
_lowerCAmelCase:Optional[Any] = -2 * _cos
_lowerCAmelCase:str = 1 - alpha
_lowerCAmelCase:Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase ( snake_case : int , snake_case : int , snake_case : float = 1 / sqrt(2 ) ):
_lowerCAmelCase:Any = tau * frequency / samplerate
_lowerCAmelCase:Dict = sin(snake_case )
_lowerCAmelCase:List[str] = cos(snake_case )
_lowerCAmelCase:Tuple = _sin / (2 * q_factor)
_lowerCAmelCase:Tuple = (1 + _cos) / 2
_lowerCAmelCase:Dict = -1 - _cos
_lowerCAmelCase:Optional[Any] = 1 + alpha
_lowerCAmelCase:Any = -2 * _cos
_lowerCAmelCase:List[str] = 1 - alpha
_lowerCAmelCase:int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase ( snake_case : int , snake_case : int , snake_case : float = 1 / sqrt(2 ) ):
_lowerCAmelCase:Any = tau * frequency / samplerate
_lowerCAmelCase:Optional[Any] = sin(snake_case )
_lowerCAmelCase:Optional[int] = cos(snake_case )
_lowerCAmelCase:List[Any] = _sin / (2 * q_factor)
_lowerCAmelCase:Any = _sin / 2
_lowerCAmelCase:Tuple = 0
_lowerCAmelCase:List[str] = -ba
_lowerCAmelCase:Union[str, Any] = 1 + alpha
_lowerCAmelCase:List[str] = -2 * _cos
_lowerCAmelCase:Any = 1 - alpha
_lowerCAmelCase:Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase ( snake_case : int , snake_case : int , snake_case : float = 1 / sqrt(2 ) ):
_lowerCAmelCase:Optional[Any] = tau * frequency / samplerate
_lowerCAmelCase:Dict = sin(snake_case )
_lowerCAmelCase:Optional[Any] = cos(snake_case )
_lowerCAmelCase:Optional[Any] = _sin / (2 * q_factor)
_lowerCAmelCase:Union[str, Any] = 1 - alpha
_lowerCAmelCase:int = -2 * _cos
_lowerCAmelCase:Tuple = 1 + alpha
_lowerCAmelCase:List[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def UpperCAmelCase ( snake_case : int , snake_case : int , snake_case : float , snake_case : float = 1 / sqrt(2 ) , ):
_lowerCAmelCase:Dict = tau * frequency / samplerate
_lowerCAmelCase:str = sin(snake_case )
_lowerCAmelCase:Optional[Any] = cos(snake_case )
_lowerCAmelCase:Optional[int] = _sin / (2 * q_factor)
_lowerCAmelCase:Dict = 10 ** (gain_db / 40)
_lowerCAmelCase:Optional[int] = 1 + alpha * big_a
_lowerCAmelCase:Union[str, Any] = -2 * _cos
_lowerCAmelCase:Optional[int] = 1 - alpha * big_a
_lowerCAmelCase:List[Any] = 1 + alpha / big_a
_lowerCAmelCase:Any = -2 * _cos
_lowerCAmelCase:Dict = 1 - alpha / big_a
_lowerCAmelCase:Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase ( snake_case : int , snake_case : int , snake_case : float , snake_case : float = 1 / sqrt(2 ) , ):
_lowerCAmelCase:Optional[int] = tau * frequency / samplerate
_lowerCAmelCase:List[str] = sin(snake_case )
_lowerCAmelCase:List[str] = cos(snake_case )
_lowerCAmelCase:Optional[int] = _sin / (2 * q_factor)
_lowerCAmelCase:Dict = 10 ** (gain_db / 40)
_lowerCAmelCase:List[Any] = (big_a + 1) - (big_a - 1) * _cos
_lowerCAmelCase:Dict = (big_a + 1) + (big_a - 1) * _cos
_lowerCAmelCase:Optional[Any] = (big_a - 1) - (big_a + 1) * _cos
_lowerCAmelCase:Tuple = (big_a - 1) + (big_a + 1) * _cos
_lowerCAmelCase:Dict = 2 * sqrt(snake_case ) * alpha
_lowerCAmelCase:Tuple = big_a * (pmc + aaa)
_lowerCAmelCase:List[str] = 2 * big_a * mpc
_lowerCAmelCase:int = big_a * (pmc - aaa)
_lowerCAmelCase:str = ppmc + aaa
_lowerCAmelCase:List[Any] = -2 * pmpc
_lowerCAmelCase:Dict = ppmc - aaa
_lowerCAmelCase:Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase ( snake_case : int , snake_case : int , snake_case : float , snake_case : float = 1 / sqrt(2 ) , ):
_lowerCAmelCase:int = tau * frequency / samplerate
_lowerCAmelCase:List[str] = sin(snake_case )
_lowerCAmelCase:Tuple = cos(snake_case )
_lowerCAmelCase:Dict = _sin / (2 * q_factor)
_lowerCAmelCase:Any = 10 ** (gain_db / 40)
_lowerCAmelCase:List[Any] = (big_a + 1) - (big_a - 1) * _cos
_lowerCAmelCase:List[Any] = (big_a + 1) + (big_a - 1) * _cos
_lowerCAmelCase:Any = (big_a - 1) - (big_a + 1) * _cos
_lowerCAmelCase:Dict = (big_a - 1) + (big_a + 1) * _cos
_lowerCAmelCase:str = 2 * sqrt(snake_case ) * alpha
_lowerCAmelCase:Union[str, Any] = big_a * (ppmc + aaa)
_lowerCAmelCase:Optional[int] = -2 * big_a * pmpc
_lowerCAmelCase:str = big_a * (ppmc - aaa)
_lowerCAmelCase:Dict = pmc + aaa
_lowerCAmelCase:Optional[Any] = 2 * mpc
_lowerCAmelCase:Optional[int] = pmc - aaa
_lowerCAmelCase:Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 227 | 0 |
'''simple docstring'''
from __future__ import annotations
__magic_name__ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] ,_a : dict[str, list[str]] ,_a : str ):
'''simple docstring'''
A_ : str = graph
# mapping node to its parent in resulting breadth first tree
A_ : dict[str, str | None] = {}
A_ : List[str] = source_vertex
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = {self.source_vertex}
A_ : List[str] = None
A_ : List[str] = [self.source_vertex] # first in first out queue
while queue:
A_ : Union[str, Any] = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_a )
A_ : Optional[Any] = vertex
queue.append(_a )
def _a ( self : Dict ,_a : str ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
A_ : Dict = self.parent.get(_a )
if target_vertex_parent is None:
A_ : str = (
f'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(_a )
return self.shortest_path(_a ) + f'->{target_vertex}'
if __name__ == "__main__":
__magic_name__ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 27 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__magic_name__ = trt.Logger(trt.Logger.WARNING)
__magic_name__ = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__magic_name__ = parser.parse_args()
if args.tokenizer_name:
__magic_name__ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__magic_name__ = args.per_device_eval_batch_size
__magic_name__ = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__magic_name__ = True
__magic_name__ = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__magic_name__ = 'temp_engine/bert-fp16.engine'
if args.inta:
__magic_name__ = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__magic_name__ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__magic_name__ = [network.get_input(i) for i in range(network.num_inputs)]
__magic_name__ = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__magic_name__ = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__magic_name__ = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__magic_name__ = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : List[str]):
A_ : str = np.asarray(inputs["""input_ids"""] , dtype=np.intaa)
A_ : int = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa)
A_ : Optional[int] = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa)
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCamelCase)
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCamelCase)
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCamelCase)
# start time
A_ : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCamelCase) for d_inp in d_inputs] + [int(lowerCamelCase), int(lowerCamelCase)] , stream_handle=stream.handle)
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase)
cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Synchronize the stream and take time
stream.synchronize()
# end time
A_ : str = time.time()
A_ : Tuple = end_time - start_time
A_ : Any = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__magic_name__ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__magic_name__ = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__magic_name__ = raw_datasets['validation'].column_names
__magic_name__ = 'question' if 'question' in column_names else column_names[0]
__magic_name__ = 'context' if 'context' in column_names else column_names[1]
__magic_name__ = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__magic_name__ = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
__magic_name__ = min(args.max_seq_length, tokenizer.model_max_length)
def lowerCamelCase ( lowerCamelCase : Dict):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
A_ : List[Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
A_ : Optional[int] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=lowerCamelCase , return_offsets_mapping=lowerCamelCase , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
A_ : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""")
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
A_ : Union[str, Any] = []
for i in range(len(tokenized_examples["""input_ids"""])):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
A_ : Any = tokenized_examples.sequence_ids(lowerCamelCase)
A_ : Tuple = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
A_ : Union[str, Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index])
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
A_ : Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i])
]
return tokenized_examples
__magic_name__ = raw_datasets['validation']
# Validation Feature Creation
__magic_name__ = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__magic_name__ = default_data_collator
__magic_name__ = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__magic_name__ = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any]="eval"):
# Post-processing: we match the start logits and end logits to answers in the original context.
A_ : Tuple = postprocess_qa_predictions(
examples=lowerCamelCase , features=lowerCamelCase , predictions=lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCamelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
A_ : Dict = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
A_ : Union[str, Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
A_ : Any = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCamelCase , label_ids=lowerCamelCase)
__magic_name__ = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return trt.volume(engine.get_binding_shape(lowerCamelCase)) * engine.get_binding_dtype(lowerCamelCase).itemsize
# Allocate device memory for inputs and outputs.
__magic_name__ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__magic_name__ = cuda.mem_alloc(h_outputa.nbytes)
__magic_name__ = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__magic_name__ = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f""" Num examples = {len(eval_dataset)}""")
logger.info(f""" Batch size = {args.per_device_eval_batch_size}""")
__magic_name__ = 0.0
__magic_name__ = 0
__magic_name__ = timeit.default_timer()
__magic_name__ = None
for step, batch in enumerate(eval_dataloader):
__magic_name__ , __magic_name__ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__magic_name__ , __magic_name__ = outputs
__magic_name__ = torch.tensor(start_logits)
__magic_name__ = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__magic_name__ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__magic_name__ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__magic_name__ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__magic_name__ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__magic_name__ = nested_truncate(all_preds, len(eval_dataset))
__magic_name__ = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000))
logger.info('Total Number of Inference = %d', niter)
__magic_name__ = post_processing_function(eval_examples, eval_dataset, all_preds)
__magic_name__ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"""Evaluation metrics: {eval_metric}""")
| 27 | 1 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
set_seed(770)
_UpperCamelCase = {
'c_attn': 'att_proj',
'c_proj': 'out_proj',
'c_fc': 'in_proj',
'transformer.': '',
'h.': 'layers.',
'ln_1': 'layernorm_1',
'ln_2': 'layernorm_2',
'ln_f': 'layernorm_final',
'wpe': 'position_embeds_layer',
'wte': 'input_embeds_layer',
}
_UpperCamelCase = {
'text_small': {
'repo_id': 'suno/bark',
'file_name': 'text.pt',
},
'coarse_small': {
'repo_id': 'suno/bark',
'file_name': 'coarse.pt',
},
'fine_small': {
'repo_id': 'suno/bark',
'file_name': 'fine.pt',
},
'text': {
'repo_id': 'suno/bark',
'file_name': 'text_2.pt',
},
'coarse': {
'repo_id': 'suno/bark',
'file_name': 'coarse_2.pt',
},
'fine': {
'repo_id': 'suno/bark',
'file_name': 'fine_2.pt',
},
}
_UpperCamelCase = os.path.dirname(os.path.abspath(__file__))
_UpperCamelCase = os.path.join(os.path.expanduser('~'), '.cache')
_UpperCamelCase = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0')
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict=False ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] =model_type
if use_small:
key += "_small"
return os.path.join(SCREAMING_SNAKE_CASE , REMOTE_MODEL_PATHS[key]['''file_name'''] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
hf_hub_download(repo_id=SCREAMING_SNAKE_CASE , filename=SCREAMING_SNAKE_CASE , local_dir=SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : List[str]="text" ):
'''simple docstring'''
if model_type == "text":
__lowerCamelCase : Union[str, Any] =BarkSemanticModel
__lowerCamelCase : Union[str, Any] =BarkSemanticConfig
__lowerCamelCase : int =BarkSemanticGenerationConfig
elif model_type == "coarse":
__lowerCamelCase : List[Any] =BarkCoarseModel
__lowerCamelCase : int =BarkCoarseConfig
__lowerCamelCase : Tuple =BarkCoarseGenerationConfig
elif model_type == "fine":
__lowerCamelCase : Optional[Any] =BarkFineModel
__lowerCamelCase : str =BarkFineConfig
__lowerCamelCase : Dict =BarkFineGenerationConfig
else:
raise NotImplementedError()
__lowerCamelCase : Tuple =F'{model_type}_small' if use_small else model_type
__lowerCamelCase : List[Any] =REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(SCREAMING_SNAKE_CASE ):
logger.info(F'{model_type} model not found, downloading into `{CACHE_DIR}`.' )
_download(model_info['''repo_id'''] , model_info['''file_name'''] )
__lowerCamelCase : Optional[int] =torch.load(SCREAMING_SNAKE_CASE , map_location=SCREAMING_SNAKE_CASE )
# this is a hack
__lowerCamelCase : Tuple =checkpoint['''model_args''']
if "input_vocab_size" not in model_args:
__lowerCamelCase : Dict =model_args['''vocab_size''']
__lowerCamelCase : int =model_args['''vocab_size''']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
__lowerCamelCase : Dict =model_args.pop('''n_head''' )
__lowerCamelCase : Dict =model_args.pop('''n_embd''' )
__lowerCamelCase : Optional[int] =model_args.pop('''n_layer''' )
__lowerCamelCase : Optional[int] =ConfigClass(**checkpoint['''model_args'''] )
__lowerCamelCase : Any =ModelClass(config=SCREAMING_SNAKE_CASE )
__lowerCamelCase : List[str] =GenerationConfigClass()
__lowerCamelCase : Optional[Any] =model_generation_config
__lowerCamelCase : Any =checkpoint['''model''']
# fixup checkpoint
__lowerCamelCase : Union[str, Any] ='''_orig_mod.'''
for k, v in list(state_dict.items() ):
if k.startswith(SCREAMING_SNAKE_CASE ):
# replace part of the key with corresponding layer name in HF implementation
__lowerCamelCase : List[Any] =k[len(SCREAMING_SNAKE_CASE ) :]
for old_layer_name in new_layer_name_dict:
__lowerCamelCase : Tuple =new_k.replace(SCREAMING_SNAKE_CASE , new_layer_name_dict[old_layer_name] )
__lowerCamelCase : Dict =state_dict.pop(SCREAMING_SNAKE_CASE )
__lowerCamelCase : Any =set(state_dict.keys() ) - set(model.state_dict().keys() )
__lowerCamelCase : Optional[int] ={k for k in extra_keys if not k.endswith('''.attn.bias''' )}
__lowerCamelCase : Optional[int] =set(model.state_dict().keys() ) - set(state_dict.keys() )
__lowerCamelCase : List[Any] ={k for k in missing_keys if not k.endswith('''.attn.bias''' )}
if len(SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(F'extra keys found: {extra_keys}' )
if len(SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(F'missing keys: {missing_keys}' )
model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
__lowerCamelCase : int =model.num_parameters(exclude_embeddings=SCREAMING_SNAKE_CASE )
__lowerCamelCase : Union[str, Any] =checkpoint['''best_val_loss'''].item()
logger.info(F'model loaded: {round(n_params/1E6 , 1 )}M params, {round(SCREAMING_SNAKE_CASE , 3 )} loss' )
model.eval()
model.to(SCREAMING_SNAKE_CASE )
del checkpoint, state_dict
return model
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : Optional[Any]="text" ):
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
__lowerCamelCase : Tuple ='''cpu''' # do conversion on cpu
__lowerCamelCase : Dict =_get_ckpt_path(SCREAMING_SNAKE_CASE , use_small=SCREAMING_SNAKE_CASE )
__lowerCamelCase : str =_load_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , model_type=SCREAMING_SNAKE_CASE , use_small=SCREAMING_SNAKE_CASE )
# load bark initial model
__lowerCamelCase : List[str] =_bark_load_model(SCREAMING_SNAKE_CASE , '''cpu''' , model_type=SCREAMING_SNAKE_CASE , use_small=SCREAMING_SNAKE_CASE )
if model_type == "text":
__lowerCamelCase : int =bark_model['''model''']
if model.num_parameters(exclude_embeddings=SCREAMING_SNAKE_CASE ) != bark_model.get_num_params():
raise ValueError('''initial and new models don\'t have the same number of parameters''' )
# check if same output as the bark model
__lowerCamelCase : Dict =5
__lowerCamelCase : Optional[int] =10
if model_type in ["text", "coarse"]:
__lowerCamelCase : List[Any] =torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
__lowerCamelCase : int =bark_model(SCREAMING_SNAKE_CASE )[0]
__lowerCamelCase : List[Any] =model(SCREAMING_SNAKE_CASE )
# take last logits
__lowerCamelCase : str =output_new_model_total.logits[:, [-1], :]
else:
__lowerCamelCase : str =3
__lowerCamelCase : Union[str, Any] =8
__lowerCamelCase : Union[str, Any] =torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
__lowerCamelCase : List[Any] =model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCamelCase : int =bark_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCamelCase : Dict =output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('''initial and new outputs don\'t have the same shape''' )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError('''initial and new outputs are not equal''' )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
__lowerCamelCase : str =os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCamelCase : List[Any] =BarkSemanticConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE , '''config.json''' ) )
__lowerCamelCase : Any =BarkCoarseConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE , '''config.json''' ) )
__lowerCamelCase : List[str] =BarkFineConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE , '''config.json''' ) )
__lowerCamelCase : Optional[Any] =EncodecConfig.from_pretrained('''facebook/encodec_24khz''' )
__lowerCamelCase : Any =BarkSemanticModel.from_pretrained(SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[int] =BarkCoarseModel.from_pretrained(SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[int] =BarkFineModel.from_pretrained(SCREAMING_SNAKE_CASE )
__lowerCamelCase : List[Any] =EncodecModel.from_pretrained('''facebook/encodec_24khz''' )
__lowerCamelCase : Any =BarkConfig.from_sub_model_configs(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCamelCase : Tuple =BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
__lowerCamelCase : Optional[int] =BarkModel(SCREAMING_SNAKE_CASE )
__lowerCamelCase : List[Any] =semantic
__lowerCamelCase : Union[str, Any] =coarseAcoustic
__lowerCamelCase : List[Any] =fineAcoustic
__lowerCamelCase : str =codec
__lowerCamelCase : int =bark_generation_config
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
bark.save_pretrained(SCREAMING_SNAKE_CASE , repo_id=SCREAMING_SNAKE_CASE , push_to_hub=SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('model_type', type=str, help='text, coarse or fine.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.')
_UpperCamelCase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 179 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
__snake_case : Tuple = """marian"""
__snake_case : Any = ["""past_key_values"""]
__snake_case : Optional[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :List[Any] , __lowercase :Any=5_8101 , __lowercase :Tuple=None , __lowercase :Union[str, Any]=1024 , __lowercase :Dict=12 , __lowercase :int=4096 , __lowercase :int=16 , __lowercase :List[Any]=12 , __lowercase :Dict=4096 , __lowercase :Dict=16 , __lowercase :Tuple=0.0 , __lowercase :Tuple=0.0 , __lowercase :List[Any]=True , __lowercase :int=True , __lowercase :Tuple="gelu" , __lowercase :str=1024 , __lowercase :Optional[int]=0.1 , __lowercase :List[str]=0.0 , __lowercase :Union[str, Any]=0.0 , __lowercase :Dict=0.02 , __lowercase :Tuple=5_8100 , __lowercase :Optional[Any]=False , __lowercase :int=5_8100 , __lowercase :Any=0 , __lowercase :str=0 , __lowercase :str=True , **__lowercase :Any , ):
__lowerCamelCase : List[Any] =vocab_size
__lowerCamelCase : Optional[int] =decoder_vocab_size or vocab_size
__lowerCamelCase : Tuple =max_position_embeddings
__lowerCamelCase : List[Any] =d_model
__lowerCamelCase : Any =encoder_ffn_dim
__lowerCamelCase : str =encoder_layers
__lowerCamelCase : List[str] =encoder_attention_heads
__lowerCamelCase : str =decoder_ffn_dim
__lowerCamelCase : Tuple =decoder_layers
__lowerCamelCase : Any =decoder_attention_heads
__lowerCamelCase : List[Any] =dropout
__lowerCamelCase : Any =attention_dropout
__lowerCamelCase : Union[str, Any] =activation_dropout
__lowerCamelCase : Optional[int] =activation_function
__lowerCamelCase : Dict =init_std
__lowerCamelCase : List[Any] =encoder_layerdrop
__lowerCamelCase : Optional[Any] =decoder_layerdrop
__lowerCamelCase : Any =use_cache
__lowerCamelCase : Any =encoder_layers
__lowerCamelCase : Optional[int] =scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCamelCase : int =share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__lowercase , eos_token_id=__lowercase , is_encoder_decoder=__lowercase , decoder_start_token_id=__lowercase , forced_eos_token_id=__lowercase , **__lowercase , )
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def __lowercase ( self :int ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCamelCase : Tuple =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowerCamelCase : Optional[Any] ={0: '''batch'''}
__lowerCamelCase : Any ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__lowerCamelCase : Optional[Any] ={0: '''batch''', 1: '''decoder_sequence'''}
__lowerCamelCase : List[Any] ={0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__lowercase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowerCamelCase : Any =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowerCamelCase , __lowerCamelCase : str =self.num_layers
for i in range(__lowercase ):
__lowerCamelCase : Optional[int] ={0: '''batch''', 2: '''past_sequence + sequence'''}
__lowerCamelCase : List[str] ={0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__lowerCamelCase : List[str] =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def __lowercase ( self :Optional[int] ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCamelCase : Tuple =super().outputs
else:
__lowerCamelCase : List[str] =super(__lowercase , self ).outputs
if self.use_past:
__lowerCamelCase , __lowerCamelCase : int =self.num_layers
for i in range(__lowercase ):
__lowerCamelCase : Any ={0: '''batch''', 2: '''past_sequence + sequence'''}
__lowerCamelCase : Optional[int] ={0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __lowercase ( self :str , __lowercase :PreTrainedTokenizer , __lowercase :int = -1 , __lowercase :int = -1 , __lowercase :bool = False , __lowercase :Optional[TensorType] = None , ):
__lowerCamelCase : List[str] =self._generate_dummy_inputs_for_encoder_and_decoder(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
# Generate decoder inputs
__lowerCamelCase : Optional[Any] =seq_length if not self.use_past else 1
__lowerCamelCase : List[Any] =self._generate_dummy_inputs_for_encoder_and_decoder(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
__lowerCamelCase : Dict ={f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
__lowerCamelCase : str =dict(**__lowercase , **__lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowerCamelCase , __lowerCamelCase : int =common_inputs['''input_ids'''].shape
__lowerCamelCase : Optional[Any] =common_inputs['''decoder_input_ids'''].shape[1]
__lowerCamelCase , __lowerCamelCase : Optional[int] =self.num_attention_heads
__lowerCamelCase : Any =(
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowerCamelCase : Dict =decoder_seq_length + 3
__lowerCamelCase : Tuple =(
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowerCamelCase : Optional[int] =torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(__lowercase , __lowercase )] , dim=1 )
__lowerCamelCase : Any =[]
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowerCamelCase , __lowerCamelCase : str =self.num_layers
__lowerCamelCase : List[Any] =min(__lowercase , __lowercase )
__lowerCamelCase : int =max(__lowercase , __lowercase ) - min_num_layers
__lowerCamelCase : Any ='''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(__lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
) )
# TODO: test this.
__lowerCamelCase : Dict =encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(__lowercase , __lowercase ):
common_inputs["past_key_values"].append((torch.zeros(__lowercase ), torch.zeros(__lowercase )) )
return common_inputs
def __lowercase ( self :Dict , __lowercase :PreTrainedTokenizer , __lowercase :int = -1 , __lowercase :int = -1 , __lowercase :bool = False , __lowercase :Optional[TensorType] = None , ):
__lowerCamelCase : List[str] =self._generate_dummy_inputs_for_encoder_and_decoder(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowerCamelCase , __lowerCamelCase : int =common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__lowerCamelCase : str =seqlen + 2
__lowerCamelCase , __lowerCamelCase : List[str] =self.num_layers
__lowerCamelCase , __lowerCamelCase : Optional[Any] =self.num_attention_heads
__lowerCamelCase : List[str] =(
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowerCamelCase : Any =common_inputs['''attention_mask'''].dtype
__lowerCamelCase : Optional[int] =torch.cat(
[common_inputs['''attention_mask'''], torch.ones(__lowercase , __lowercase , dtype=__lowercase )] , dim=1 )
__lowerCamelCase : List[str] =[
(torch.zeros(__lowercase ), torch.zeros(__lowercase )) for _ in range(__lowercase )
]
return common_inputs
def __lowercase ( self :int , __lowercase :PreTrainedTokenizer , __lowercase :int = -1 , __lowercase :int = -1 , __lowercase :bool = False , __lowercase :Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowerCamelCase : Union[str, Any] =compute_effective_axis_dimension(
__lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowerCamelCase : Optional[int] =tokenizer.num_special_tokens_to_add(__lowercase )
__lowerCamelCase : Optional[int] =compute_effective_axis_dimension(
__lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowercase )
# Generate dummy inputs according to compute batch and sequence
__lowerCamelCase : Any =[''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowerCamelCase : Union[str, Any] =dict(tokenizer(__lowercase , return_tensors=__lowercase ) )
return common_inputs
def __lowercase ( self :Tuple , __lowercase :PreTrainedTokenizer , __lowercase :int = -1 , __lowercase :int = -1 , __lowercase :bool = False , __lowercase :Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCamelCase : int =self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__lowercase , batch_size=__lowercase , seq_length=__lowercase , is_pair=__lowercase , framework=__lowercase )
else:
__lowerCamelCase : List[Any] =self._generate_dummy_inputs_for_causal_lm(
__lowercase , batch_size=__lowercase , seq_length=__lowercase , is_pair=__lowercase , framework=__lowercase )
return common_inputs
def __lowercase ( self :Optional[int] , __lowercase :Tuple , __lowercase :Tuple , __lowercase :Dict , __lowercase :List[str] ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCamelCase : str =super()._flatten_past_key_values_(__lowercase , __lowercase , __lowercase , __lowercase )
else:
__lowerCamelCase : Optional[Any] =super(__lowercase , self )._flatten_past_key_values_(
__lowercase , __lowercase , __lowercase , __lowercase )
@property
def __lowercase ( self :List[str] ):
return 1e-4
| 179 | 1 |
from __future__ import annotations
def __UpperCamelCase ( _A ): # This function is recursive
lowerCAmelCase_ = len(__A )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowerCAmelCase_ = array[0]
lowerCAmelCase_ = False
lowerCAmelCase_ = 1
lowerCAmelCase_ = []
while not is_found and i < array_length:
if array[i] < pivot:
lowerCAmelCase_ = True
lowerCAmelCase_ = [element for element in array[i:] if element >= array[i]]
lowerCAmelCase_ = longest_subsequence(__A )
if len(__A ) > len(__A ):
lowerCAmelCase_ = temp_array
else:
i += 1
lowerCAmelCase_ = [element for element in array[1:] if element >= pivot]
lowerCAmelCase_ = [pivot, *longest_subsequence(__A )]
if len(__A ) > len(__A ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 |
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A ( __UpperCAmelCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__, '''embed_dim''' ) )
self.parent.assertTrue(hasattr(UpperCamelCase__, '''num_heads''' ) )
class A :
def __init__( self, UpperCamelCase__, UpperCamelCase__=13, UpperCamelCase__=64, UpperCamelCase__=3, UpperCamelCase__=[16, 48, 96], UpperCamelCase__=[1, 3, 6], UpperCamelCase__=[1, 2, 10], UpperCamelCase__=[7, 3, 3], UpperCamelCase__=[4, 2, 2], UpperCamelCase__=[2, 1, 1], UpperCamelCase__=[2, 2, 2], UpperCamelCase__=[False, False, True], UpperCamelCase__=[0.0, 0.0, 0.0], UpperCamelCase__=0.02, UpperCamelCase__=1E-12, UpperCamelCase__=True, UpperCamelCase__=True, UpperCamelCase__=2, ):
"""simple docstring"""
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = image_size
lowerCAmelCase_ = patch_sizes
lowerCAmelCase_ = patch_stride
lowerCAmelCase_ = patch_padding
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = embed_dim
lowerCAmelCase_ = num_heads
lowerCAmelCase_ = stride_kv
lowerCAmelCase_ = depth
lowerCAmelCase_ = cls_token
lowerCAmelCase_ = attention_drop_rate
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size], self.num_labels )
lowerCAmelCase_ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return CvtConfig(
image_size=self.image_size, num_labels=self.num_labels, num_channels=self.num_channels, embed_dim=self.embed_dim, num_heads=self.num_heads, patch_sizes=self.patch_sizes, patch_padding=self.patch_padding, patch_stride=self.patch_stride, stride_kv=self.stride_kv, depth=self.depth, cls_token=self.cls_token, attention_drop_rate=self.attention_drop_rate, initializer_range=self.initializer_range, )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = CvtModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase_ = model(UpperCamelCase__ )
lowerCAmelCase_ = (self.image_size, self.image_size)
lowerCAmelCase_ , lowerCAmelCase_ = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
lowerCAmelCase_ = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
lowerCAmelCase_ = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.embed_dim[-1], height, width) )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = CvtForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase_ = model(UpperCamelCase__, labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = config_and_inputs
lowerCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__snake_case = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
__snake_case = (
{'feature-extraction': CvtModel, 'image-classification': CvtForImageClassification}
if is_torch_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = CvtModelTester(self )
lowerCAmelCase_ = ConfigTester(self, config_class=UpperCamelCase__, has_text_modality=UpperCamelCase__, hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return
@unittest.skip(reason='''Cvt does not output attentions''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(UpperCamelCase__ )
lowerCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ = [*signature.parameters.keys()]
lowerCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1], UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
lowerCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__, UpperCamelCase__ ) )
lowerCAmelCase_ = outputs.hidden_states
lowerCAmelCase_ = len(self.model_tester.depth )
self.assertEqual(len(UpperCamelCase__ ), UpperCamelCase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ), [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
], )
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = True
check_hidden_states_output(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ = True
check_hidden_states_output(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = CvtModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def __UpperCamelCase ( ):
lowerCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(UpperCamelCase__ )
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(images=UpperCamelCase__, return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase_ = model(**UpperCamelCase__ )
# verify the logits
lowerCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor([0.9_285, 0.9_015, -0.3_150] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], UpperCamelCase__, atol=1E-4 ) )
| 325 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
def __A ( UpperCAmelCase ) -> List[List[ImageInput]]:
'''simple docstring'''
if isinstance(UpperCAmelCase ,(list, tuple) ) and isinstance(videos[0] ,(list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(UpperCAmelCase ,(list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(UpperCAmelCase ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__ = ['''pixel_values''']
def __init__( self : Any , lowercase__ : bool = True , lowercase__ : Dict[str, int] = None , lowercase__ : PILImageResampling = PILImageResampling.BILINEAR , lowercase__ : bool = True , lowercase__ : Dict[str, int] = None , lowercase__ : bool = True , lowercase__ : Union[int, float] = 1 / 255 , lowercase__ : bool = True , lowercase__ : Optional[Union[float, List[float]]] = None , lowercase__ : Optional[Union[float, List[float]]] = None , **lowercase__ : List[str] , ) ->None:
'''simple docstring'''
super().__init__(**lowercase__ )
_UpperCamelCase : Optional[int] = size if size is not None else {"shortest_edge": 224}
_UpperCamelCase : Optional[Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
_UpperCamelCase : Optional[int] = crop_size if crop_size is not None else {"height": 224, "width": 224}
_UpperCamelCase : str = get_size_dict(lowercase__ , param_name="crop_size" )
_UpperCamelCase : Any = do_resize
_UpperCamelCase : Dict = size
_UpperCamelCase : List[Any] = do_center_crop
_UpperCamelCase : List[str] = crop_size
_UpperCamelCase : int = resample
_UpperCamelCase : Tuple = do_rescale
_UpperCamelCase : List[str] = rescale_factor
_UpperCamelCase : Optional[int] = do_normalize
_UpperCamelCase : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self : Union[str, Any] , lowercase__ : np.ndarray , lowercase__ : Dict[str, int] , lowercase__ : PILImageResampling = PILImageResampling.BILINEAR , lowercase__ : Optional[Union[str, ChannelDimension]] = None , **lowercase__ : Dict , ) ->np.ndarray:
'''simple docstring'''
_UpperCamelCase : Any = get_size_dict(lowercase__ , default_to_square=lowercase__ )
if "shortest_edge" in size:
_UpperCamelCase : int = get_resize_output_image_size(lowercase__ , size["shortest_edge"] , default_to_square=lowercase__ )
elif "height" in size and "width" in size:
_UpperCamelCase : Tuple = (size["height"], size["width"])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def snake_case__ ( self : List[str] , lowercase__ : np.ndarray , lowercase__ : Dict[str, int] , lowercase__ : Optional[Union[str, ChannelDimension]] = None , **lowercase__ : Any , ) ->np.ndarray:
'''simple docstring'''
_UpperCamelCase : Tuple = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(lowercase__ , size=(size["height"], size["width"]) , data_format=lowercase__ , **lowercase__ )
def snake_case__ ( self : Tuple , lowercase__ : np.ndarray , lowercase__ : Union[int, float] , lowercase__ : Optional[Union[str, ChannelDimension]] = None , **lowercase__ : Tuple , ) ->str:
'''simple docstring'''
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def snake_case__ ( self : Optional[int] , lowercase__ : np.ndarray , lowercase__ : Union[float, List[float]] , lowercase__ : Union[float, List[float]] , lowercase__ : Optional[Union[str, ChannelDimension]] = None , **lowercase__ : Optional[Any] , ) ->np.ndarray:
'''simple docstring'''
return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__ )
def snake_case__ ( self : int , lowercase__ : ImageInput , lowercase__ : bool = None , lowercase__ : Dict[str, int] = None , lowercase__ : PILImageResampling = None , lowercase__ : bool = None , lowercase__ : Dict[str, int] = None , lowercase__ : bool = None , lowercase__ : float = None , lowercase__ : bool = None , lowercase__ : Optional[Union[float, List[float]]] = None , lowercase__ : Optional[Union[float, List[float]]] = None , lowercase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ) ->np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_UpperCamelCase : int = to_numpy_array(lowercase__ )
if do_resize:
_UpperCamelCase : Optional[Any] = self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__ )
if do_center_crop:
_UpperCamelCase : Union[str, Any] = self.center_crop(lowercase__ , size=lowercase__ )
if do_rescale:
_UpperCamelCase : List[Any] = self.rescale(image=lowercase__ , scale=lowercase__ )
if do_normalize:
_UpperCamelCase : Union[str, Any] = self.normalize(image=lowercase__ , mean=lowercase__ , std=lowercase__ )
_UpperCamelCase : List[str] = to_channel_dimension_format(lowercase__ , lowercase__ )
return image
def snake_case__ ( self : Optional[int] , lowercase__ : ImageInput , lowercase__ : bool = None , lowercase__ : Dict[str, int] = None , lowercase__ : PILImageResampling = None , lowercase__ : bool = None , lowercase__ : Dict[str, int] = None , lowercase__ : bool = None , lowercase__ : float = None , lowercase__ : bool = None , lowercase__ : Optional[Union[float, List[float]]] = None , lowercase__ : Optional[Union[float, List[float]]] = None , lowercase__ : Optional[Union[str, TensorType]] = None , lowercase__ : ChannelDimension = ChannelDimension.FIRST , **lowercase__ : Optional[int] , ) ->PIL.Image.Image:
'''simple docstring'''
_UpperCamelCase : Dict = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase : Optional[int] = resample if resample is not None else self.resample
_UpperCamelCase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase : Optional[int] = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase : List[Any] = image_std if image_std is not None else self.image_std
_UpperCamelCase : Optional[Any] = size if size is not None else self.size
_UpperCamelCase : List[Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
_UpperCamelCase : Tuple = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase : Any = get_size_dict(lowercase__ , param_name="crop_size" )
if not valid_images(lowercase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
_UpperCamelCase : Optional[Any] = make_batched(lowercase__ )
_UpperCamelCase : List[Any] = [
[
self._preprocess_image(
image=lowercase__ , do_resize=lowercase__ , size=lowercase__ , resample=lowercase__ , do_center_crop=lowercase__ , crop_size=lowercase__ , do_rescale=lowercase__ , rescale_factor=lowercase__ , do_normalize=lowercase__ , image_mean=lowercase__ , image_std=lowercase__ , data_format=lowercase__ , )
for img in video
]
for video in videos
]
_UpperCamelCase : List[str] = {"pixel_values": videos}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 435 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ : List[str] = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : int = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : int = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Any = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 435 | 1 |
"""simple docstring"""
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def snake_case_ ( A_ : Any ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(A_, A_ )
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
_lowerCamelCase : int = emb.weight.shape
_lowerCamelCase : str = nn.Linear(A_, A_, bias=A_ )
_lowerCamelCase : List[Any] = emb.weight.data
return lin_layer
def snake_case_ ( A_ : Any, A_ : Dict=None ):
'''simple docstring'''
_lowerCamelCase : Dict = {}
for old_key in state_dict.keys():
_lowerCamelCase : List[Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
_lowerCamelCase : Tuple = key.replace('''moe_layer.experts.0''', F'''ffn.experts.expert_{expert_idx}''' )
else:
_lowerCamelCase : List[str] = key.replace('''moe_layer.experts.''', '''ffn.experts.expert_''' )
if "gate" in key:
_lowerCamelCase : Tuple = key.replace('''.moe_layer.gate.wg''', '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
_lowerCamelCase : int = key.replace('''.fc2.''', '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
_lowerCamelCase : Union[str, Any] = key.replace('''.fc1.''', '''.ffn.fc1.''' )
if ".encoder_attn." in key:
_lowerCamelCase : Any = key.replace('''.encoder_attn.''', '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
_lowerCamelCase : Optional[int] = key.replace('''encoder_attn_layer_norm''', '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
_lowerCamelCase : Optional[Any] = key.replace('''final_layer_norm''', '''ff_layer_norm''' )
_lowerCamelCase : Optional[int] = state_dict[old_key]
return new_dict
def snake_case_ ( A_ : Tuple, A_ : List[str], A_ : Optional[int], A_ : Optional[Any], A_ : str = WEIGHTS_NAME ):
'''simple docstring'''
_lowerCamelCase : Tuple = []
_lowerCamelCase : Union[str, Any] = 0
os.makedirs(A_, exist_ok=A_ )
for expert in range(A_ ):
_lowerCamelCase : List[Any] = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(A_ ):
_lowerCamelCase : str = torch.load(A_ )['''model''']
remove_ignore_keys_(A_ )
_lowerCamelCase : Union[str, Any] = rename_fairseq_keys(A_, A_ )
_lowerCamelCase : Any = os.path.join(
A_, weights_name.replace('''.bin''', F'''-{len(A_ )+1:05d}-of-???.bin''' ) )
torch.save(A_, A_ )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(A_ )[0]].dtype )
# Add the last block
_lowerCamelCase : Any = os.path.join(A_, weights_name.replace('''.bin''', F'''-{len(A_ )+1:05d}-of-???.bin''' ) )
_lowerCamelCase : Tuple = torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(A_ )
_lowerCamelCase : Union[str, Any] = rename_fairseq_keys(A_, A_ )
_lowerCamelCase : Any = shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(A_ ) == 1:
_lowerCamelCase : int = os.path.join(A_, A_ )
torch.save(A_, A_ )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(A_, A_ )
# Otherwise, let's build the index
_lowerCamelCase : Optional[Any] = {}
for idx, shard in enumerate(A_ ):
_lowerCamelCase : str = weights_name.replace('''.bin''', F'''-{idx+1:05d}-of-{len(A_ ):05d}.bin''' )
_lowerCamelCase : List[Any] = os.path.join(A_, weights_name.replace('''.bin''', F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(A_, os.path.join(A_, A_ ) )
for key in shard:
_lowerCamelCase : Optional[int] = shard_file
# Add the metadata
_lowerCamelCase : Optional[int] = {'''total_size''': total_size}
_lowerCamelCase : str = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(A_, A_ ), '''w''', encoding='''utf-8''' ) as f:
_lowerCamelCase : Union[str, Any] = json.dumps(A_, indent=2, sort_keys=A_ ) + '''\n'''
f.write(A_ )
return metadata, index
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ , lowerCAmelCase__ = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
lowerCAmelCase__ = NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
lowerCAmelCase__ = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path)
| 700 |
"""simple docstring"""
def snake_case_ ( A_ : int = 10, A_ : int = 22 ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = range(1, A_ )
_lowerCamelCase : Dict = range(1, A_ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(10, 22) = }""")
| 598 | 0 |
'''simple docstring'''
from math import sqrt
def _lowerCAmelCase ( __snake_case : int ) -> bool:
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' must been an int and positive"
__A : Optional[int] = True
# 0 and 1 are none primes.
if number <= 1:
__A : Tuple = False
for divisor in range(2 , int(round(sqrt(__snake_case ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__A : Union[str, Any] = False
break
# precondition
assert isinstance(__snake_case , __snake_case ), "'status' must been from type bool"
return status
def _lowerCAmelCase ( __snake_case : Tuple ) -> Optional[int]:
assert isinstance(__snake_case , __snake_case ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__A : List[str] = list(range(2 , n + 1 ) )
__A : Any = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(__snake_case ) ):
for j in range(i + 1 , len(__snake_case ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__A : Any = 0
# filters actual prime numbers.
__A : Dict = [x for x in begin_list if x != 0]
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type list"
return ans
def _lowerCAmelCase ( __snake_case : str ) -> Union[str, Any]:
assert isinstance(__snake_case , __snake_case ) and (n > 2), "'N' must been an int and > 2"
__A : List[Any] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(__snake_case ):
ans.append(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type list"
return ans
def _lowerCAmelCase ( __snake_case : List[Any] ) -> List[str]:
assert isinstance(__snake_case , __snake_case ) and number >= 0, "'number' must been an int and >= 0"
__A : Optional[int] = [] # this list will be returns of the function.
# potential prime number factors.
__A : int = 2
__A : Tuple = number
if number == 0 or number == 1:
ans.append(__snake_case )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(__snake_case ):
while quotient != 1:
if is_prime(__snake_case ) and (quotient % factor == 0):
ans.append(__snake_case )
quotient /= factor
else:
factor += 1
else:
ans.append(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type list"
return ans
def _lowerCAmelCase ( __snake_case : str ) -> Optional[int]:
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
__A : Any = 0
# prime factorization of 'number'
__A : Dict = prime_factorization(__snake_case )
__A : Union[str, Any] = max(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type int"
return ans
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> str:
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
__A : Tuple = 0
# prime factorization of 'number'
__A : Any = prime_factorization(__snake_case )
__A : Any = min(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type int"
return ans
def _lowerCAmelCase ( __snake_case : Union[str, Any] ) -> Optional[int]:
assert isinstance(__snake_case , __snake_case ), "'number' must been an int"
assert isinstance(number % 2 == 0 , __snake_case ), "compare bust been from type bool"
return number % 2 == 0
def _lowerCAmelCase ( __snake_case : List[str] ) -> str:
assert isinstance(__snake_case , __snake_case ), "'number' must been an int"
assert isinstance(number % 2 != 0 , __snake_case ), "compare bust been from type bool"
return number % 2 != 0
def _lowerCAmelCase ( __snake_case : Union[str, Any] ) -> Optional[int]:
assert (
isinstance(__snake_case , __snake_case ) and (number > 2) and is_even(__snake_case )
), "'number' must been an int, even and > 2"
__A : List[Any] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__A : List[str] = get_prime_numbers(__snake_case )
__A : Tuple = len(__snake_case )
# run variable for while-loops.
__A : Optional[int] = 0
__A : Union[str, Any] = None
# exit variable. for break up the loops
__A : str = True
while i < len_pn and loop:
__A : int = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__A : Any = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(__snake_case , __snake_case )
and (len(__snake_case ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : int ) -> Any:
assert (
isinstance(__snake_case , __snake_case )
and isinstance(__snake_case , __snake_case )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__A : List[str] = 0
while numbera != 0:
__A : Optional[int] = numbera % numbera
__A : Dict = numbera
__A : List[Any] = rest
# precondition
assert isinstance(__snake_case , __snake_case ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _lowerCAmelCase ( __snake_case : Optional[int] , __snake_case : List[Any] ) -> List[Any]:
assert (
isinstance(__snake_case , __snake_case )
and isinstance(__snake_case , __snake_case )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__A : Union[str, Any] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__A : Optional[Any] = prime_factorization(__snake_case )
__A : Union[str, Any] = prime_factorization(__snake_case )
elif numbera == 1 or numbera == 1:
__A : str = []
__A : Union[str, Any] = []
__A : Union[str, Any] = max(__snake_case , __snake_case )
__A : Union[str, Any] = 0
__A : Tuple = 0
__A : List[Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__A : Dict = prime_fac_a.count(__snake_case )
__A : List[str] = prime_fac_a.count(__snake_case )
for _ in range(max(__snake_case , __snake_case ) ):
ans *= n
else:
__A : int = prime_fac_a.count(__snake_case )
for _ in range(__snake_case ):
ans *= n
done.append(__snake_case )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__A : str = prime_fac_a.count(__snake_case )
for _ in range(__snake_case ):
ans *= n
done.append(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> List[Any]:
assert isinstance(__snake_case , __snake_case ) and (n >= 0), "'number' must been a positive int"
__A : List[Any] = 0
__A : str = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(__snake_case ):
ans += 1
# precondition
assert isinstance(__snake_case , __snake_case ) and is_prime(
__snake_case ), "'ans' must been a prime number and from type int"
return ans
def _lowerCAmelCase ( __snake_case : Optional[int] , __snake_case : Optional[int] ) -> Dict:
assert (
is_prime(__snake_case ) and is_prime(__snake_case ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__A : Any = p_number_a + 1 # jump to the next number
__A : List[Any] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(__snake_case ):
number += 1
while number < p_number_a:
ans.append(__snake_case )
number += 1
# fetch the next prime number.
while not is_prime(__snake_case ):
number += 1
# precondition
assert (
isinstance(__snake_case , __snake_case )
and ans[0] != p_number_a
and ans[len(__snake_case ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _lowerCAmelCase ( __snake_case : Dict ) -> str:
assert isinstance(__snake_case , __snake_case ) and (n >= 1), "'n' must been int and >= 1"
__A : List[str] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(__snake_case )
# precondition
assert ans[0] == 1 and ans[len(__snake_case ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _lowerCAmelCase ( __snake_case : Union[str, Any] ) -> Dict:
assert isinstance(__snake_case , __snake_case ) and (
number > 1
), "'number' must been an int and >= 1"
__A : Optional[Any] = get_divisors(__snake_case )
# precondition
assert (
isinstance(__snake_case , __snake_case )
and (divisors[0] == 1)
and (divisors[len(__snake_case ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _lowerCAmelCase ( __snake_case : Any , __snake_case : int ) -> List[Any]:
assert (
isinstance(__snake_case , __snake_case )
and isinstance(__snake_case , __snake_case )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__A : Optional[int] = gcd(abs(__snake_case ) , abs(__snake_case ) )
# precondition
assert (
isinstance(__snake_case , __snake_case )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _lowerCAmelCase ( __snake_case : Tuple ) -> int:
assert isinstance(__snake_case , __snake_case ) and (n >= 0), "'n' must been a int and >= 0"
__A : Optional[int] = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _lowerCAmelCase ( __snake_case : str ) -> Dict:
assert isinstance(__snake_case , __snake_case ) and (n >= 0), "'n' must been an int and >= 0"
__A : Union[str, Any] = 0
__A : List[str] = 1
__A : Dict = 1 # this will be return
for _ in range(n - 1 ):
__A : str = ans
ans += fiba
__A : List[Any] = tmp
return ans | 8 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :Optional[int] = """ClapFeatureExtractor"""
__SCREAMING_SNAKE_CASE :List[Any] = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self : Optional[Any] , a__ : Dict , a__ : Dict ):
super().__init__(a__ , a__ )
def __call__( self : Dict , a__ : List[str]=None , a__ : List[Any]=None , a__ : Any=None , **a__ : Tuple ):
__magic_name__ = kwargs.pop('''sampling_rate''' , a__ )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
__magic_name__ = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if audios is not None:
__magic_name__ = self.feature_extractor(
a__ , sampling_rate=a__ , return_tensors=a__ , **a__ )
if text is not None and audios is not None:
__magic_name__ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def snake_case__ ( self : List[Any] , *a__ : str , **a__ : List[str] ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def snake_case__ ( self : int , *a__ : Tuple , **a__ : Tuple ):
return self.tokenizer.decode(*a__ , **a__ )
@property
def snake_case__ ( self : Any ):
__magic_name__ = self.tokenizer.model_input_names
__magic_name__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 432 | 0 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
__snake_case : Optional[int] = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
__snake_case : Any = parser.parse_args()
if args.model_type == "bert":
__snake_case : Optional[Any] = BertForMaskedLM.from_pretrained(args.model_name)
__snake_case : List[Any] = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
__snake_case : Optional[int] = model.state_dict()
__snake_case : List[str] = {}
for w in ["word_embeddings", "position_embeddings"]:
__snake_case : Any = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
__snake_case : Union[str, Any] = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
__snake_case : List[Any] = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
__snake_case : Tuple = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
__snake_case : Optional[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
__snake_case : Union[str, Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
__snake_case : Optional[int] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
__snake_case : Dict = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
__snake_case : Optional[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
__snake_case : Optional[int] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
__snake_case : Optional[int] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
__snake_case : int = state_dict['cls.predictions.decoder.weight']
__snake_case : Tuple = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
__snake_case : List[str] = state_dict[f"""cls.predictions.transform.dense.{w}"""]
__snake_case : int = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 707 |
import argparse
import struct
import unittest
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , A ) ->None:
UpperCAmelCase__ :Dict = data
# Initialize hash values
UpperCAmelCase__ :str = [
0x6a_09e_667,
0xbb_67a_e85,
0x3c_6ef_372,
0xa5_4ff_53a,
0x51_0e5_27f,
0x9b_056_88c,
0x1f_83d_9ab,
0x5b_e0c_d19,
]
# Initialize round constants
UpperCAmelCase__ :str = [
0x42_8a2_f98,
0x71_374_491,
0xb5_c0f_bcf,
0xe9_b5d_ba5,
0x39_56c_25b,
0x59_f11_1f1,
0x92_3f8_2a4,
0xab_1c5_ed5,
0xd8_07a_a98,
0x12_835_b01,
0x24_318_5be,
0x55_0c7_dc3,
0x72_be5_d74,
0x80_deb_1fe,
0x9b_dc0_6a7,
0xc1_9bf_174,
0xe4_9b6_9c1,
0xef_be4_786,
0x0f_c19_dc6,
0x24_0ca_1cc,
0x2d_e92_c6f,
0x4a_748_4aa,
0x5c_b0a_9dc,
0x76_f98_8da,
0x98_3e5_152,
0xa8_31c_66d,
0xb0_032_7c8,
0xbf_597_fc7,
0xc6_e00_bf3,
0xd5_a79_147,
0x06_ca6_351,
0x14_292_967,
0x27_b70_a85,
0x2e_1b2_138,
0x4d_2c6_dfc,
0x53_380_d13,
0x65_0a7_354,
0x76_6a0_abb,
0x81_c2c_92e,
0x92_722_c85,
0xa2_bfe_8a1,
0xa8_1a6_64b,
0xc2_4b8_b70,
0xc7_6c5_1a3,
0xd1_92e_819,
0xd6_990_624,
0xf4_0e3_585,
0x10_6aa_070,
0x19_a4c_116,
0x1e_376_c08,
0x27_487_74c,
0x34_b0b_cb5,
0x39_1c0_cb3,
0x4e_d8a_a4a,
0x5b_9cc_a4f,
0x68_2e6_ff3,
0x74_8f8_2ee,
0x78_a56_36f,
0x84_c87_814,
0x8c_c70_208,
0x90_bef_ffa,
0xa4_506_ceb,
0xbe_f9a_3f7,
0xc6_717_8f2,
]
UpperCAmelCase__ :Any = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def A__ ( A ) ->bytes:
UpperCAmelCase__ :List[Any] = b'\x80' + (b'\x00' * (63 - (len(A ) + 8) % 64))
UpperCAmelCase__ :Optional[int] = struct.pack('>Q' , (len(A ) * 8) )
return data + padding + big_endian_integer
def A__ ( self ) ->None:
# Convert into blocks of 64 bytes
UpperCAmelCase__ :List[Any] = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCAmelCase__ :Any = list(struct.unpack('>16L' , A ) )
# add 48 0-ed integers
words += [0] * 48
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :List[Any] = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCAmelCase__ :Optional[Any] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
UpperCAmelCase__ :Any = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
UpperCAmelCase__ :str = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x100_000_000
# Compression
UpperCAmelCase__ :Tuple = self.ror(A , 6 ) ^ self.ror(A , 11 ) ^ self.ror(A , 25 )
UpperCAmelCase__ :int = (e & f) ^ ((~e & 0xff_fff_fff) & g)
UpperCAmelCase__ :str = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100_000_000
UpperCAmelCase__ :Optional[int] = self.ror(A , 2 ) ^ self.ror(A , 13 ) ^ self.ror(A , 22 )
UpperCAmelCase__ :Optional[int] = (a & b) ^ (a & c) ^ (b & c)
UpperCAmelCase__ :Optional[int] = (sa + maj) % 0x100_000_000
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :Tuple = (
g,
f,
e,
((d + tempa) % 0x100_000_000),
c,
b,
a,
((tempa + tempa) % 0x100_000_000),
)
UpperCAmelCase__ :str = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCAmelCase__ :Tuple = [
((element + mutated_hash_values[index]) % 0x100_000_000)
for index, element in enumerate(self.hashes )
]
UpperCAmelCase__ :Dict = ''.join([hex(A )[2:].zfill(8 ) for value in self.hashes] )
def A__ ( self , A , A ) ->int:
return 0xff_fff_fff & (value << (32 - rotations)) | (value >> rotations)
class UpperCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def A__ ( self ) ->None:
import hashlib
UpperCAmelCase__ :Optional[Any] = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(A ).hash , hashlib.shaaaa(A ).hexdigest() )
def A ( ):
"""simple docstring"""
import doctest
doctest.testmod()
UpperCAmelCase__ :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
UpperCAmelCase__ :List[Any] = parser.parse_args()
UpperCAmelCase__ :List[Any] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
UpperCAmelCase__ :Optional[Any] = f.read()
else:
UpperCAmelCase__ :Any = bytes(SCREAMING_SNAKE_CASE , 'utf-8' )
print(SHAaaa(SCREAMING_SNAKE_CASE ).hash )
if __name__ == "__main__":
main()
| 433 | 0 |
"""simple docstring"""
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
_lowerCAmelCase : List[str] = 0b101_100_111_110_110_010_010_000_011_110_111_011_000_110_011_110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
_lowerCAmelCase : Optional[int] = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class lowerCAmelCase__ :
def __init__( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = WATERMARK_BITS
UpperCAmelCase__ : Dict = WatermarkEncoder()
self.encoder.set_watermark("bits" , self.watermark )
def __a ( self : Optional[Any] , snake_case__ : torch.FloatTensor ):
'''simple docstring'''
# can't encode images that are smaller than 256
if images.shape[-1] < 2_5_6:
return images
UpperCAmelCase__ : str = (2_5_5 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCAmelCase__ : Tuple = [self.encoder.encode(snake_case__ , "dwtDct" ) for image in images]
UpperCAmelCase__ : List[str] = torch.from_numpy(np.array(snake_case__ ) ).permute(0 , 3 , 1 , 2 )
UpperCAmelCase__ : Tuple = torch.clamp(2 * (images / 2_5_5 - 0.5) , min=-1.0 , max=1.0 )
return images
| 438 |
"""simple docstring"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : int = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase : List[Any] = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
_lowerCAmelCase : List[str] = {
"""allenai/led-base-16384""": 16_384,
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ =LEDTokenizer
SCREAMING_SNAKE_CASE_ =['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , snake_case__ : str=None , snake_case__ : List[Any]=None , snake_case__ : Dict=None , snake_case__ : List[str]="replace" , snake_case__ : Optional[int]="<s>" , snake_case__ : List[str]="</s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : Dict="<s>" , snake_case__ : Tuple="<unk>" , snake_case__ : Any="<pad>" , snake_case__ : Dict="<mask>" , snake_case__ : int=False , snake_case__ : Optional[int]=True , **snake_case__ : List[Any] , ):
'''simple docstring'''
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
UpperCAmelCase__ : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case__ ) != add_prefix_space:
UpperCAmelCase__ : Dict = getattr(snake_case__ , pre_tok_state.pop("type" ) )
UpperCAmelCase__ : str = add_prefix_space
UpperCAmelCase__ : Any = pre_tok_class(**snake_case__ )
UpperCAmelCase__ : Dict = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase__ : List[str] = "post_processor"
UpperCAmelCase__ : List[Any] = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
UpperCAmelCase__ : int = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase__ : Optional[Any] = tuple(state["sep"] )
if "cls" in state:
UpperCAmelCase__ : Any = tuple(state["cls"] )
UpperCAmelCase__ : Any = False
if state.get("add_prefix_space" , snake_case__ ) != add_prefix_space:
UpperCAmelCase__ : Union[str, Any] = add_prefix_space
UpperCAmelCase__ : List[Any] = True
if state.get("trim_offsets" , snake_case__ ) != trim_offsets:
UpperCAmelCase__ : Optional[int] = trim_offsets
UpperCAmelCase__ : List[Any] = True
if changes_to_apply:
UpperCAmelCase__ : List[str] = getattr(snake_case__ , state.pop("type" ) )
UpperCAmelCase__ : Optional[int] = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def __a ( self : Any ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __a ( self : Any , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : str = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
UpperCAmelCase__ : Dict = value
def __a ( self : str , *snake_case__ : Any , **snake_case__ : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = kwargs.get("is_split_into_words" , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def __a ( self : List[str] , *snake_case__ : Union[str, Any] , **snake_case__ : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = kwargs.get("is_split_into_words" , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*snake_case__ , **snake_case__ )
def __a ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def __a ( self : str , snake_case__ : List[Any] , snake_case__ : str=None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __a ( self : str , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [self.sep_token_id]
UpperCAmelCase__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __a ( self : Any , snake_case__ : Union[Dict[str, EncodedInput], BatchEncoding] , snake_case__ : Optional[int] = None , snake_case__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , ):
'''simple docstring'''
UpperCAmelCase__ : str = super()._pad(
encoded_inputs=snake_case__ , max_length=snake_case__ , padding_strategy=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , )
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase__ : Optional[int] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase__ : List[Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase__ : Any = len(encoded_inputs["global_attention_mask"] ) != len(snake_case__ )
if needs_to_be_padded:
UpperCAmelCase__ : List[str] = len(snake_case__ ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase__ : Dict = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase__ : Dict = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 438 | 1 |
"""simple docstring"""
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : List[Any] = 0
lowerCamelCase : str = len(_lowerCamelCase )
for i in range(n - 1 ):
for j in range(i + 1, _lowerCamelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def UpperCAmelCase ( a_ ):
'''simple docstring'''
if len(_lowerCamelCase ) <= 1:
return arr, 0
lowerCamelCase : Union[str, Any] = len(_lowerCamelCase ) // 2
lowerCamelCase : str = arr[0:mid]
lowerCamelCase : int = arr[mid:]
lowerCamelCase : int = count_inversions_recursive(_lowerCamelCase )
lowerCamelCase : Dict = count_inversions_recursive(_lowerCamelCase )
lowerCamelCase : Union[str, Any] = _count_cross_inversions(_lowerCamelCase, _lowerCamelCase )
lowerCamelCase : Union[str, Any] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = []
lowerCamelCase : Optional[int] = 0
while i < len(_lowerCamelCase ) and j < len(_lowerCamelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(_lowerCamelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(_lowerCamelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def UpperCAmelCase ( ):
'''simple docstring'''
lowerCamelCase : Optional[int] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
lowerCamelCase : List[str] = count_inversions_bf(_lowerCamelCase )
lowerCamelCase : Union[str, Any] = count_inversions_recursive(_lowerCamelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print('number of inversions = ', _lowerCamelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
lowerCamelCase : Any = count_inversions_bf(_lowerCamelCase )
lowerCamelCase : List[Any] = count_inversions_recursive(_lowerCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ', _lowerCamelCase )
# an empty list should also have zero inversions
lowerCamelCase : str = []
lowerCamelCase : int = count_inversions_bf(_lowerCamelCase )
lowerCamelCase : Optional[int] = count_inversions_recursive(_lowerCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ', _lowerCamelCase )
if __name__ == "__main__":
main()
| 706 |
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase ( a_ ):
'''simple docstring'''
if num <= 0:
lowerCamelCase : Tuple = F"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(a_ )
lowerCamelCase : Optional[Any] = [True] * (num + 1)
lowerCamelCase : int = []
lowerCamelCase : Dict = 2
lowerCamelCase : List[str] = int(math.sqrt(a_ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(a_ )
# Set multiples of start be False
for i in range(start * start, num + 1, a_ ):
if sieve[i] is True:
lowerCamelCase : Optional[int] = False
start += 1
for j in range(end + 1, num + 1 ):
if sieve[j] is True:
prime.append(a_ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 133 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.