code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
lowerCamelCase : Optional[Any] =0 # The first color of the flag.
lowerCamelCase : List[Any] =1 # The second color of the flag.
lowerCamelCase : Dict =2 # The third color of the flag.
lowerCamelCase : Optional[Any] =(red, white, blue)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Tuple:
if not sequence:
return []
if len(_A ) == 1:
return list(_A )
UpperCamelCase__ : Optional[Any] = 0
UpperCamelCase__ : int = len(_A ) - 1
UpperCamelCase__ : Dict = 0
while mid <= high:
if sequence[mid] == colors[0]:
UpperCamelCase__ , UpperCamelCase__ : Any = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
UpperCamelCase__ , UpperCamelCase__ : Any = sequence[high], sequence[mid]
high -= 1
else:
UpperCamelCase__ : Optional[Any] = f'The elements inside the sequence must contains only {colors} values'
raise ValueError(_A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Optional[Any] =input('''Enter numbers separated by commas:\n''').strip()
lowerCamelCase : List[str] =[int(item.strip()) for item in user_input.split(''',''')]
print(F"""{dutch_national_flag_sort(unsorted)}""") | 189 |
import functools
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
if not isinstance(_A , _A ) or not all(isinstance(_A , _A ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(_A ) != 3 or not all(isinstance(_A , _A ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(_A ) == 0:
return 0
if min(_A ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(_A ) >= 366:
raise ValueError("All days elements should be less than 366" )
snake_case_ = set(_A )
@functools.cache
def dynamic_programming(_A ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 187 | 0 |
import argparse
import struct
import unittest
class snake_case__ :
"""simple docstring"""
def __init__( self , __lowercase ) -> None:
"""simple docstring"""
a__ : Any = data
# Initialize hash values
a__ : Union[str, Any] = [
0x6a09_e667,
0xbb67_ae85,
0x3c6e_f372,
0xa54f_f53a,
0x510e_527f,
0x9b05_688c,
0x1f83_d9ab,
0x5be0_cd19,
]
# Initialize round constants
a__ : List[Any] = [
0x428a_2f98,
0x7137_4491,
0xb5c0_fbcf,
0xe9b5_dba5,
0x3956_c25b,
0x59f1_11f1,
0x923f_82a4,
0xab1c_5ed5,
0xd807_aa98,
0x1283_5b01,
0x2431_85be,
0x550c_7dc3,
0x72be_5d74,
0x80de_b1fe,
0x9bdc_06a7,
0xc19b_f174,
0xe49b_69c1,
0xefbe_4786,
0x0fc1_9dc6,
0x240c_a1cc,
0x2de9_2c6f,
0x4a74_84aa,
0x5cb0_a9dc,
0x76f9_88da,
0x983e_5152,
0xa831_c66d,
0xb003_27c8,
0xbf59_7fc7,
0xc6e0_0bf3,
0xd5a7_9147,
0x06ca_6351,
0x1429_2967,
0x27b7_0a85,
0x2e1b_2138,
0x4d2c_6dfc,
0x5338_0d13,
0x650a_7354,
0x766a_0abb,
0x81c2_c92e,
0x9272_2c85,
0xa2bf_e8a1,
0xa81a_664b,
0xc24b_8b70,
0xc76c_51a3,
0xd192_e819,
0xd699_0624,
0xf40e_3585,
0x106a_a070,
0x19a4_c116,
0x1e37_6c08,
0x2748_774c,
0x34b0_bcb5,
0x391c_0cb3,
0x4ed8_aa4a,
0x5b9c_ca4f,
0x682e_6ff3,
0x748f_82ee,
0x78a5_636f,
0x84c8_7814,
0x8cc7_0208,
0x90be_fffa,
0xa450_6ceb,
0xbef9_a3f7,
0xc671_78f2,
]
a__ : Dict = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def SCREAMING_SNAKE_CASE__( __lowercase ) -> bytes:
"""simple docstring"""
a__ : List[str] = B"""\x80""" + (B"""\x00""" * (6_3 - (len(__lowercase ) + 8) % 6_4))
a__ : str = struct.pack(""">Q""" , (len(__lowercase ) * 8) )
return data + padding + big_endian_integer
def SCREAMING_SNAKE_CASE__( self ) -> None:
"""simple docstring"""
a__ : Optional[int] = [
self.preprocessed_data[x : x + 6_4]
for x in range(0 , len(self.preprocessed_data ) , 6_4 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
a__ : List[str] = list(struct.unpack(""">16L""" , __lowercase ) )
# add 48 0-ed integers
words += [0] * 4_8
a__ : int = self.hashes
for index in range(0 , 6_4 ):
if index > 1_5:
# modify the zero-ed indexes at the end of the array
a__ : Tuple = (
self.ror(words[index - 1_5] , 7 )
^ self.ror(words[index - 1_5] , 1_8 )
^ (words[index - 1_5] >> 3)
)
a__ : Union[str, Any] = (
self.ror(words[index - 2] , 1_7 )
^ self.ror(words[index - 2] , 1_9 )
^ (words[index - 2] >> 1_0)
)
a__ : str = (
words[index - 1_6] + sa + words[index - 7] + sa
) % 0x1_0000_0000
# Compression
a__ : Optional[int] = self.ror(__lowercase , 6 ) ^ self.ror(__lowercase , 1_1 ) ^ self.ror(__lowercase , 2_5 )
a__ : List[str] = (e & f) ^ ((~e & 0xffff_ffff) & g)
a__ : Dict = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0000_0000
a__ : Tuple = self.ror(__lowercase , 2 ) ^ self.ror(__lowercase , 1_3 ) ^ self.ror(__lowercase , 2_2 )
a__ : Optional[Any] = (a & b) ^ (a & c) ^ (b & c)
a__ : Tuple = (sa + maj) % 0x1_0000_0000
a__ : Dict = (
g,
f,
e,
((d + tempa) % 0x1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0x1_0000_0000),
)
a__ : Tuple = [a, b, c, d, e, f, g, h]
# Modify final values
a__ : List[Any] = [
((element + mutated_hash_values[index]) % 0x1_0000_0000)
for index, element in enumerate(self.hashes )
]
a__ : List[Any] = """""".join([hex(__lowercase )[2:].zfill(8 ) for value in self.hashes] )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase ) -> int:
"""simple docstring"""
return 0xffff_ffff & (value << (3_2 - rotations)) | (value >> rotations)
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__( self ) -> None:
"""simple docstring"""
import hashlib
a__ : List[str] = bytes("""Test String""" , """utf-8""" )
self.assertEqual(SHAaaa(__lowercase ).hash , hashlib.shaaaa(__lowercase ).hexdigest() )
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
a__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""")
a__ : List[str] = parser.parse_args()
a__ : Union[str, Any] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""") as f:
a__ : Union[str, Any] = f.read()
else:
a__ : Union[str, Any] = bytes(_lowercase , """utf-8""")
print(SHAaaa(_lowercase).hash)
if __name__ == "__main__":
main()
| 360 |
from __future__ import annotations
import requests
_lowercase : Dict =set(
"approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports".split()
)
def lowerCAmelCase_ ( _lowercase : str , _lowercase : int = 1 , _lowercase : str = "new" , _lowercase : list | None = None) -> dict:
"""simple docstring"""
a__ : Optional[Any] = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(_lowercase) - valid_terms)):
a__ : Any = F'''Invalid search term: {invalid_search_terms}'''
raise ValueError(_lowercase)
a__ : int = requests.get(
F'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' , headers={"""User-agent""": """A random string"""} , )
if response.status_code == 429:
raise requests.HTTPError
a__ : int = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(_lowercase)}
a__ : int = {}
for id_ in range(_lowercase):
a__ : List[Any] = {
item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("learnpython", wanted_data=["title", "url", "selftext"]))
| 266 | 0 |
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
lowerCamelCase = parse(importlib.metadata.version('''torch'''))
def UpperCAmelCase__ ( _A : Union[str, Version] , _A : str , _A : str ):
'''simple docstring'''
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
a__ =STR_OPERATION_TO_FUNC[operation]
if isinstance(_A , _A ):
a__ =parse(importlib.metadata.version(_A ) )
return operation(_A , parse(_A ) )
def UpperCAmelCase__ ( _A : str , _A : str ):
'''simple docstring'''
return compare_versions(_A , _A , _A )
| 188 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def UpperCAmelCase__ ( _A : List[Any] ):
'''simple docstring'''
if "resnet-50" in model_name:
a__ =ResNetConfig.from_pretrained('''microsoft/resnet-50''' )
elif "resnet-101" in model_name:
a__ =ResNetConfig.from_pretrained('''microsoft/resnet-101''' )
else:
raise ValueError('''Model name should include either resnet50 or resnet101''' )
a__ =DetrConfig(use_timm_backbone=_A , backbone_config=_A )
# set label attributes
a__ ='''panoptic''' in model_name
if is_panoptic:
a__ =2_50
else:
a__ =91
a__ ='''huggingface/label-files'''
a__ ='''coco-detection-id2label.json'''
a__ =json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
a__ ={int(_A ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCAmelCase__ ( _A : Optional[int] ):
'''simple docstring'''
a__ =[]
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') )
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') )
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') )
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') )
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""",
F"""encoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""",
F"""decoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
) )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
) )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
] )
return rename_keys
def UpperCAmelCase__ ( _A : str , _A : Union[str, Any] , _A : int ):
'''simple docstring'''
a__ =state_dict.pop(_A )
a__ =val
def UpperCAmelCase__ ( _A : Dict , _A : str=False ):
'''simple docstring'''
a__ =''''''
if is_panoptic:
a__ ='''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
a__ =state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
a__ =state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
a__ =in_proj_weight[:2_56, :]
a__ =in_proj_bias[:2_56]
a__ =in_proj_weight[2_56:5_12, :]
a__ =in_proj_bias[2_56:5_12]
a__ =in_proj_weight[-2_56:, :]
a__ =in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
a__ =state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
a__ =state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
a__ =in_proj_weight[:2_56, :]
a__ =in_proj_bias[:2_56]
a__ =in_proj_weight[2_56:5_12, :]
a__ =in_proj_bias[2_56:5_12]
a__ =in_proj_weight[-2_56:, :]
a__ =in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
a__ =state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
a__ =state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
a__ =in_proj_weight_cross_attn[:2_56, :]
a__ =in_proj_bias_cross_attn[:2_56]
a__ =in_proj_weight_cross_attn[2_56:5_12, :]
a__ =in_proj_bias_cross_attn[2_56:5_12]
a__ =in_proj_weight_cross_attn[-2_56:, :]
a__ =in_proj_bias_cross_attn[-2_56:]
def UpperCAmelCase__ ( ):
'''simple docstring'''
a__ ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
a__ =Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( _A : Dict , _A : Optional[Any]=None , _A : List[str]=False ):
'''simple docstring'''
a__, a__ =get_detr_config(_A )
# load original model from torch hub
a__ ={
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(F"""Converting model {model_name}...""" )
a__ =torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=_A ).eval()
a__ =detr.state_dict()
# rename keys
for src, dest in create_rename_keys(_A ):
if is_panoptic:
a__ ='''detr.''' + src
rename_key(_A , _A , _A )
# query, key and value matrices need special treatment
read_in_q_k_v(_A , is_panoptic=_A )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
a__ ='''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
a__ =state_dict.pop(_A )
a__ =val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
a__ =state_dict.pop(_A )
a__ =val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
a__ =state_dict.pop(_A )
a__ =val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
a__ =state_dict.pop(_A )
a__ =val
# finally, create HuggingFace model and load state dict
a__ =DetrForSegmentation(_A ) if is_panoptic else DetrForObjectDetection(_A )
model.load_state_dict(_A )
model.eval()
# verify our conversion on an image
a__ ='''coco_panoptic''' if is_panoptic else '''coco_detection'''
a__ =DetrImageProcessor(format=_A )
a__ =processor(images=prepare_img() , return_tensors='''pt''' )
a__ =encoding['''pixel_values''']
a__ =detr(_A )
a__ =model(_A )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
processor.save_pretrained(_A )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''' )
model.push_to_hub(F"""nielsr/{model_name}""" )
processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''detr-resnet-50''',
type=str,
choices=['''detr-resnet-50''', '''detr-resnet-101'''],
help='''Name of the DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub or not.''')
lowerCamelCase = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 188 | 1 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). " , __lowerCAmelCase , )
class UpperCAmelCase__ ( __lowerCAmelCase ):
"""simple docstring"""
a = RobertaConfig
a = "roberta"
def __init__( self : Optional[int] , __lowerCamelCase : str ) -> Dict:
super().__init__(lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ = RobertaEmbeddings(lowerCamelCase__ )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. " , __lowerCAmelCase , )
class UpperCAmelCase__ ( __lowerCAmelCase ):
"""simple docstring"""
a = RobertaConfig
a = "roberta"
def __init__( self : Dict , __lowerCamelCase : Tuple ) -> Union[str, Any]:
super().__init__(lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ = config.num_labels
SCREAMING_SNAKE_CASE__ = config.num_hidden_layers
SCREAMING_SNAKE_CASE__ = DeeRobertaModel(lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ = nn.Dropout(config.hidden_dropout_prob )
SCREAMING_SNAKE_CASE__ = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
def lowercase_ ( self : int , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Any=None , __lowerCamelCase : Any=None , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str=None , __lowerCamelCase : List[str]=-1 , __lowerCamelCase : int=False , ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.num_layers
try:
SCREAMING_SNAKE_CASE__ = self.roberta(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , position_ids=lowerCamelCase__ , head_mask=lowerCamelCase__ , inputs_embeds=lowerCamelCase__ , )
SCREAMING_SNAKE_CASE__ = outputs[1]
SCREAMING_SNAKE_CASE__ = self.dropout(lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ = self.classifier(lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
SCREAMING_SNAKE_CASE__ = e.message
SCREAMING_SNAKE_CASE__ = e.exit_layer
SCREAMING_SNAKE_CASE__ = outputs[0]
if not self.training:
SCREAMING_SNAKE_CASE__ = entropy(lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE__ = MSELoss()
SCREAMING_SNAKE_CASE__ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE__ = CrossEntropyLoss()
SCREAMING_SNAKE_CASE__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
SCREAMING_SNAKE_CASE__ = []
for highway_exit in outputs[-1]:
SCREAMING_SNAKE_CASE__ = highway_exit[0]
if not self.training:
highway_logits_all.append(lowerCamelCase__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE__ = MSELoss()
SCREAMING_SNAKE_CASE__ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE__ = CrossEntropyLoss()
SCREAMING_SNAKE_CASE__ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(lowerCamelCase__ )
if train_highway:
SCREAMING_SNAKE_CASE__ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
SCREAMING_SNAKE_CASE__ = (loss,) + outputs
if not self.training:
SCREAMING_SNAKE_CASE__ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
SCREAMING_SNAKE_CASE__ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 357 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 218 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if gpta_config_file == "":
lowercase : Union[str, Any] = GPTaConfig()
else:
lowercase : Tuple = GPTaConfig.from_json_file(_UpperCAmelCase )
lowercase : List[Any] = GPTaModel(_UpperCAmelCase )
# Load weights from numpy
load_tf_weights_in_gpta(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
lowercase : Optional[Any] = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowercase : Dict = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , _UpperCAmelCase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_UpperCamelCase: List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
_UpperCamelCase: Dict = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 255 |
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, unittest.TestCase ):
_lowerCamelCase = StableDiffusionDiffEditPipeline
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
_lowerCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCamelCase = frozenset([] )
def lowercase ( self : Any ) -> Dict:
torch.manual_seed(0 )
lowercase : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=lowerCAmelCase, )
lowercase : Tuple = DDIMScheduler(
beta_start=0.0_0085, beta_end=0.012, beta_schedule='scaled_linear', clip_sample=lowerCAmelCase, set_alpha_to_one=lowerCAmelCase, )
lowercase : Any = DDIMInverseScheduler(
beta_start=0.0_0085, beta_end=0.012, beta_schedule='scaled_linear', clip_sample=lowerCAmelCase, set_alpha_to_zero=lowerCAmelCase, )
torch.manual_seed(0 )
lowercase : int = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
lowercase : List[str] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act='gelu', projection_dim=512, )
lowercase : str = CLIPTextModel(lowerCAmelCase )
lowercase : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowercase : Tuple = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase ( self : Tuple, lowerCAmelCase : List[str], lowerCAmelCase : Tuple=0 ) -> Union[str, Any]:
lowercase : List[Any] = floats_tensor((1, 16, 16), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
lowercase : Union[str, Any] = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
if str(lowerCAmelCase ).startswith('mps' ):
lowercase : Optional[Any] = torch.manual_seed(lowerCAmelCase )
else:
lowercase : Optional[Any] = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowercase : Tuple = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : Union[str, Any], lowerCAmelCase : Tuple, lowerCAmelCase : Dict=0 ) -> Optional[Any]:
lowercase : Any = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
lowercase : List[str] = image.cpu().permute(0, 2, 3, 1 )[0]
lowercase : Optional[int] = Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('RGB' )
if str(lowerCAmelCase ).startswith('mps' ):
lowercase : Optional[int] = torch.manual_seed(lowerCAmelCase )
else:
lowercase : Optional[Any] = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowercase : List[Any] = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : Optional[int], lowerCAmelCase : Any, lowerCAmelCase : List[str]=0 ) -> Union[str, Any]:
lowercase : Optional[int] = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
lowercase : Tuple = image.cpu().permute(0, 2, 3, 1 )[0]
lowercase : Tuple = Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('RGB' )
if str(lowerCAmelCase ).startswith('mps' ):
lowercase : Optional[int] = torch.manual_seed(lowerCAmelCase )
else:
lowercase : List[str] = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowercase : Union[str, Any] = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : Optional[int] ) -> str:
if not hasattr(self.pipeline_class, '_optional_components' ):
return
lowercase : Optional[int] = self.get_dummy_components()
lowercase : int = self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowercase : List[Any] = self.get_dummy_inputs(lowerCAmelCase )
lowercase : Any = pipe(**lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCAmelCase )
lowercase : Any = self.pipeline_class.from_pretrained(lowerCAmelCase )
pipe_loaded.to(lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=lowerCAmelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCAmelCase, lowerCAmelCase ) is None, f'''`{optional_component}` did not stay set to None after loading.''', )
lowercase : Tuple = self.get_dummy_inputs(lowerCAmelCase )
lowercase : Optional[Any] = pipe_loaded(**lowerCAmelCase )[0]
lowercase : List[Any] = np.abs(output - output_loaded ).max()
self.assertLess(lowerCAmelCase, 1e-4 )
def lowercase ( self : Any ) -> str:
lowercase : Union[str, Any] = 'cpu'
lowercase : Optional[int] = self.get_dummy_components()
lowercase : List[str] = self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : Any = self.get_dummy_mask_inputs(lowerCAmelCase )
lowercase : str = pipe.generate_mask(**lowerCAmelCase )
lowercase : str = mask[0, -3:, -3:]
self.assertEqual(mask.shape, (1, 16, 16) )
lowercase : List[str] = np.array([0] * 9 )
lowercase : Dict = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase, 1e-3 )
self.assertEqual(mask[0, -3, -4], 0 )
def lowercase ( self : int ) -> str:
lowercase : int = 'cpu'
lowercase : Dict = self.get_dummy_components()
lowercase : Optional[int] = self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : Any = self.get_dummy_inversion_inputs(lowerCAmelCase )
lowercase : Tuple = pipe.invert(**lowerCAmelCase ).images
lowercase : Tuple = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
lowercase : List[Any] = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799], )
lowercase : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase, 1e-3 )
def lowercase ( self : str ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def lowercase ( self : List[str] ) -> Tuple:
lowercase : Dict = 'cpu'
lowercase : Any = self.get_dummy_components()
lowercase : List[Any] = {'beta_start': 0.0_0085, 'beta_end': 0.012, 'beta_schedule': 'scaled_linear'}
lowercase : List[str] = DPMSolverMultistepScheduler(**lowerCAmelCase )
lowercase : Dict = DPMSolverMultistepInverseScheduler(**lowerCAmelCase )
lowercase : str = self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : List[Any] = self.get_dummy_inversion_inputs(lowerCAmelCase )
lowercase : int = pipe.invert(**lowerCAmelCase ).images
lowercase : Tuple = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
lowercase : Dict = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799], )
lowercase : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase, 1e-3 )
@require_torch_gpu
@slow
class a__ ( unittest.TestCase ):
def lowercase ( self : Optional[Any] ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def lowercase ( cls : Optional[int] ) -> Tuple:
lowercase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
lowercase : Optional[Any] = raw_image.convert('RGB' ).resize((768, 768) )
lowercase : Any = raw_image
def lowercase ( self : Optional[Any] ) -> List[Any]:
lowercase : str = torch.manual_seed(0 )
lowercase : int = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa )
lowercase : List[str] = DDIMScheduler.from_config(pipe.scheduler.config )
lowercase : List[Any] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : List[Any] = 'a bowl of fruit'
lowercase : List[Any] = 'a bowl of pears'
lowercase : int = pipe.generate_mask(
image=self.raw_image, source_prompt=lowerCAmelCase, target_prompt=lowerCAmelCase, generator=lowerCAmelCase, )
lowercase : Tuple = pipe.invert(
prompt=lowerCAmelCase, image=self.raw_image, inpaint_strength=0.7, generator=lowerCAmelCase ).latents
lowercase : str = pipe(
prompt=lowerCAmelCase, mask_image=lowerCAmelCase, image_latents=lowerCAmelCase, generator=lowerCAmelCase, negative_prompt=lowerCAmelCase, inpaint_strength=0.7, output_type='numpy', ).images[0]
lowercase : Dict = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def lowercase ( self : Union[str, Any] ) -> List[Any]:
lowercase : Dict = torch.manual_seed(0 )
lowercase : Union[str, Any] = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa )
lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase : Any = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : Union[str, Any] = 'a bowl of fruit'
lowercase : List[Any] = 'a bowl of pears'
lowercase : List[Any] = pipe.generate_mask(
image=self.raw_image, source_prompt=lowerCAmelCase, target_prompt=lowerCAmelCase, generator=lowerCAmelCase, )
lowercase : List[str] = pipe.invert(
prompt=lowerCAmelCase, image=self.raw_image, inpaint_strength=0.7, generator=lowerCAmelCase, num_inference_steps=25, ).latents
lowercase : int = pipe(
prompt=lowerCAmelCase, mask_image=lowerCAmelCase, image_latents=lowerCAmelCase, generator=lowerCAmelCase, negative_prompt=lowerCAmelCase, inpaint_strength=0.7, num_inference_steps=25, output_type='numpy', ).images[0]
lowercase : Tuple = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 255 | 1 |
"""simple docstring"""
__UpperCamelCase : Optional[Any] = {
'''meter''': '''m''',
'''kilometer''': '''km''',
'''megametre''': '''Mm''',
'''gigametre''': '''Gm''',
'''terametre''': '''Tm''',
'''petametre''': '''Pm''',
'''exametre''': '''Em''',
'''zettametre''': '''Zm''',
'''yottametre''': '''Ym''',
}
# Exponent of the factor(meter)
__UpperCamelCase : Tuple = {
'''m''': 0,
'''km''': 3,
'''Mm''': 6,
'''Gm''': 9,
'''Tm''': 1_2,
'''Pm''': 1_5,
'''Em''': 1_8,
'''Zm''': 2_1,
'''Ym''': 2_4,
}
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
lowerCAmelCase__ : str = from_type.lower().strip('''s''' )
lowerCAmelCase__ : Dict = to_type.lower().strip('''s''' )
lowerCAmelCase__ : str = UNIT_SYMBOL.get(A_ , A_ )
lowerCAmelCase__ : str = UNIT_SYMBOL.get(A_ , A_ )
if from_sanitized not in METRIC_CONVERSION:
lowerCAmelCase__ : List[Any] = (
f'Invalid \'from_type\' value: {from_type!r}.\n'
f'Conversion abbreviations are: {", ".join(A_ )}'
)
raise ValueError(A_ )
if to_sanitized not in METRIC_CONVERSION:
lowerCAmelCase__ : Tuple = (
f'Invalid \'to_type\' value: {to_type!r}.\n'
f'Conversion abbreviations are: {", ".join(A_ )}'
)
raise ValueError(A_ )
lowerCAmelCase__ : Optional[Any] = METRIC_CONVERSION[from_sanitized]
lowerCAmelCase__ : Dict = METRIC_CONVERSION[to_sanitized]
lowerCAmelCase__ : Optional[Any] = 1
if from_exponent > to_exponent:
lowerCAmelCase__ : Tuple = from_exponent - to_exponent
else:
lowerCAmelCase__ : Optional[int] = -(to_exponent - from_exponent)
return value * pow(10 , A_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 355 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__UpperCamelCase : Any = NewType('''DataClass''', Any)
__UpperCamelCase : List[str] = NewType('''DataClassType''', Any)
def __SCREAMING_SNAKE_CASE ( A_ ):
if isinstance(A_ , A_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : int = {str(A_ ): choice for choice in choices}
return lambda A_ : str_to_choice.get(A_ , A_ )
def __SCREAMING_SNAKE_CASE ( *,
A_ = None , A_ = None , A_ = dataclasses.MISSING , A_ = dataclasses.MISSING , A_ = None , **A_ , ):
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
lowerCAmelCase__ : Dict = {}
if aliases is not None:
lowerCAmelCase__ : int = aliases
if help is not None:
lowerCAmelCase__ : Optional[int] = help
return dataclasses.field(metadata=A_ , default=A_ , default_factory=A_ , **A_ )
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = 42
def __init__( self : Dict ,lowercase_ : Union[DataClassType, Iterable[DataClassType]] ,**lowercase_ : str ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
lowerCAmelCase__ : Tuple = ArgumentDefaultsHelpFormatter
super().__init__(**lowercase_ )
if dataclasses.is_dataclass(lowercase_ ):
lowerCAmelCase__ : Tuple = [dataclass_types]
lowerCAmelCase__ : List[str] = list(lowercase_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(lowercase_ )
@staticmethod
def __lowerCAmelCase ( lowercase_ : ArgumentParser ,lowercase_ : dataclasses.Field ):
lowerCAmelCase__ : Dict = F'--{field.name}'
lowerCAmelCase__ : List[str] = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type ,lowercase_ ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
lowerCAmelCase__ : List[str] = kwargs.pop('''aliases''' ,[] )
if isinstance(lowercase_ ,lowercase_ ):
lowerCAmelCase__ : Optional[Any] = [aliases]
lowerCAmelCase__ : Union[str, Any] = getattr(field.type ,'''__origin__''' ,field.type )
if origin_type is Union or (hasattr(lowercase_ ,'''UnionType''' ) and isinstance(lowercase_ ,types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(lowercase_ ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
F' Problem encountered in field \'{field.name}\'.' )
if type(lowercase_ ) not in field.type.__args__:
# filter `str` in Union
lowerCAmelCase__ : int = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
lowerCAmelCase__ : List[str] = getattr(field.type ,'''__origin__''' ,field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
lowerCAmelCase__ : Optional[int] = (
field.type.__args__[0] if isinstance(lowercase_ ,field.type.__args__[1] ) else field.type.__args__[1]
)
lowerCAmelCase__ : Optional[Any] = getattr(field.type ,'''__origin__''' ,field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
lowerCAmelCase__ : List[Any] = {}
if origin_type is Literal or (isinstance(field.type ,lowercase_ ) and issubclass(field.type ,lowercase_ )):
if origin_type is Literal:
lowerCAmelCase__ : Union[str, Any] = field.type.__args__
else:
lowerCAmelCase__ : Optional[Any] = [x.value for x in field.type]
lowerCAmelCase__ : List[str] = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
lowerCAmelCase__ : int = field.default
else:
lowerCAmelCase__ : Any = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
lowerCAmelCase__ : List[Any] = copy(lowercase_ )
# Hack because type=bool in argparse does not behave as we want.
lowerCAmelCase__ : Tuple = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
lowerCAmelCase__ : List[Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
lowerCAmelCase__ : Tuple = default
# This tells argparse we accept 0 or 1 value after --field_name
lowerCAmelCase__ : Union[str, Any] = '''?'''
# This is the value that will get picked if we do --field_name (without value)
lowerCAmelCase__ : Any = True
elif isclass(lowercase_ ) and issubclass(lowercase_ ,lowercase_ ):
lowerCAmelCase__ : List[str] = field.type.__args__[0]
lowerCAmelCase__ : str = '''+'''
if field.default_factory is not dataclasses.MISSING:
lowerCAmelCase__ : Dict = field.default_factory()
elif field.default is dataclasses.MISSING:
lowerCAmelCase__ : str = True
else:
lowerCAmelCase__ : List[Any] = field.type
if field.default is not dataclasses.MISSING:
lowerCAmelCase__ : str = field.default
elif field.default_factory is not dataclasses.MISSING:
lowerCAmelCase__ : Any = field.default_factory()
else:
lowerCAmelCase__ : Optional[Any] = True
parser.add_argument(lowercase_ ,*lowercase_ ,**lowercase_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
lowerCAmelCase__ : Optional[Any] = False
parser.add_argument(F'--no_{field.name}' ,action='''store_false''' ,dest=field.name ,**lowercase_ )
def __lowerCAmelCase ( self : str ,lowercase_ : DataClassType ):
if hasattr(lowercase_ ,'''_argument_group_name''' ):
lowerCAmelCase__ : Optional[int] = self.add_argument_group(dtype._argument_group_name )
else:
lowerCAmelCase__ : List[str] = self
try:
lowerCAmelCase__ : Dict[str, type] = get_type_hints(lowercase_ )
except NameError:
raise RuntimeError(
F'Type resolution failed for {dtype}. Try declaring the class in global scope or '
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(lowercase_ ):
lowerCAmelCase__ : int = '''.'''.join(map(lowercase_ ,sys.version_info[:3] ) )
raise RuntimeError(
F'Type resolution failed for {dtype} on Python {python_version}. Try removing '
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(lowercase_ ):
if not field.init:
continue
lowerCAmelCase__ : Any = type_hints[field.name]
self._parse_dataclass_field(lowercase_ ,lowercase_ )
def __lowerCAmelCase ( self : Any ,lowercase_ : Optional[Any]=None ,lowercase_ : str=False ,lowercase_ : str=True ,lowercase_ : Any=None ,lowercase_ : List[str]=None ,):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
lowerCAmelCase__ : int = []
if args_filename:
args_files.append(Path(lowercase_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
lowerCAmelCase__ : List[str] = ArgumentParser()
args_file_parser.add_argument(lowercase_ ,type=lowercase_ ,action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = args_file_parser.parse_known_args(args=lowercase_ )
lowerCAmelCase__ : int = vars(lowercase_ ).get(args_file_flag.lstrip('''-''' ) ,lowercase_ )
if cmd_args_file_paths:
args_files.extend([Path(lowercase_ ) for p in cmd_args_file_paths] )
lowerCAmelCase__ : Tuple = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
lowerCAmelCase__ : Dict = file_args + args if args is not None else file_args + sys.argv[1:]
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = self.parse_known_args(args=lowercase_ )
lowerCAmelCase__ : Optional[Any] = []
for dtype in self.dataclass_types:
lowerCAmelCase__ : int = {f.name for f in dataclasses.fields(lowercase_ ) if f.init}
lowerCAmelCase__ : int = {k: v for k, v in vars(lowercase_ ).items() if k in keys}
for k in keys:
delattr(lowercase_ ,lowercase_ )
lowerCAmelCase__ : Optional[Any] = dtype(**lowercase_ )
outputs.append(lowercase_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(lowercase_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' )
return (*outputs,)
def __lowerCAmelCase ( self : Any ,lowercase_ : Dict[str, Any] ,lowercase_ : bool = False ):
lowerCAmelCase__ : List[Any] = set(args.keys() )
lowerCAmelCase__ : Any = []
for dtype in self.dataclass_types:
lowerCAmelCase__ : Optional[Any] = {f.name for f in dataclasses.fields(lowercase_ ) if f.init}
lowerCAmelCase__ : Union[str, Any] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
lowerCAmelCase__ : Union[str, Any] = dtype(**lowercase_ )
outputs.append(lowercase_ )
if not allow_extra_keys and unused_keys:
raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(lowercase_ )}' )
return tuple(lowercase_ )
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : str ,lowercase_ : bool = False ):
with open(Path(lowercase_ ) ,encoding='''utf-8''' ) as open_json_file:
lowerCAmelCase__ : Union[str, Any] = json.loads(open_json_file.read() )
lowerCAmelCase__ : List[str] = self.parse_dict(lowercase_ ,allow_extra_keys=lowercase_ )
return tuple(lowercase_ )
def __lowerCAmelCase ( self : Dict ,lowercase_ : str ,lowercase_ : bool = False ):
lowerCAmelCase__ : Tuple = self.parse_dict(yaml.safe_load(Path(lowercase_ ).read_text() ) ,allow_extra_keys=lowercase_ )
return tuple(lowercase_ )
| 74 | 0 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__lowercase = '''.'''
if __name__ == "__main__":
__lowercase = os.path.join(REPO_PATH, '''utils/documentation_tests.txt''')
__lowercase = []
__lowercase = []
with open(doctest_file_path) as fp:
for line in fp:
__lowercase = line.strip()
__lowercase = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__lowercase = '''\n'''.join(non_existent_paths)
raise ValueError(F'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
| 43 | import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : str = logging.get_logger(__name__)
__a : int = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : Any = '''wavlm'''
def __init__( self , lowerCAmelCase__=32 , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__="group" , lowerCAmelCase__="gelu" , lowerCAmelCase__=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowerCAmelCase__=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase__=(10, 3, 3, 3, 3, 2, 2) , lowerCAmelCase__=False , lowerCAmelCase__=1_28 , lowerCAmelCase__=16 , lowerCAmelCase__=3_20 , lowerCAmelCase__=8_00 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=0.05 , lowerCAmelCase__=10 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0 , lowerCAmelCase__=10 , lowerCAmelCase__=3_20 , lowerCAmelCase__=2 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_00 , lowerCAmelCase__=2_56 , lowerCAmelCase__=2_56 , lowerCAmelCase__=0.1 , lowerCAmelCase__="mean" , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=2_56 , lowerCAmelCase__=(5_12, 5_12, 5_12, 5_12, 15_00) , lowerCAmelCase__=(5, 3, 3, 1, 1) , lowerCAmelCase__=(1, 2, 3, 1, 1) , lowerCAmelCase__=5_12 , lowerCAmelCase__=80 , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=2 , lowerCAmelCase__=False , lowerCAmelCase__=3 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Tuple:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
__lowercase = hidden_size
__lowercase = feat_extract_norm
__lowercase = feat_extract_activation
__lowercase = list(lowerCAmelCase__ )
__lowercase = list(lowerCAmelCase__ )
__lowercase = list(lowerCAmelCase__ )
__lowercase = conv_bias
__lowercase = num_buckets
__lowercase = max_bucket_distance
__lowercase = num_conv_pos_embeddings
__lowercase = num_conv_pos_embedding_groups
__lowercase = len(self.conv_dim )
__lowercase = num_hidden_layers
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = num_attention_heads
__lowercase = hidden_dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = feat_proj_dropout
__lowercase = final_dropout
__lowercase = layerdrop
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = num_ctc_classes
__lowercase = vocab_size
__lowercase = do_stable_layer_norm
__lowercase = use_weighted_layer_sum
__lowercase = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowercase = apply_spec_augment
__lowercase = mask_time_prob
__lowercase = mask_time_length
__lowercase = mask_time_min_masks
__lowercase = mask_feature_prob
__lowercase = mask_feature_length
# parameters for pretraining with codevector quantized representations
__lowercase = num_codevectors_per_group
__lowercase = num_codevector_groups
__lowercase = contrastive_logits_temperature
__lowercase = num_negatives
__lowercase = codevector_dim
__lowercase = proj_codevector_dim
__lowercase = diversity_loss_weight
# ctc loss
__lowercase = ctc_loss_reduction
__lowercase = ctc_zero_infinity
# adapter
__lowercase = add_adapter
__lowercase = adapter_kernel_size
__lowercase = adapter_stride
__lowercase = num_adapter_layers
__lowercase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowercase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowercase = list(lowerCAmelCase__ )
__lowercase = list(lowerCAmelCase__ )
__lowercase = list(lowerCAmelCase__ )
__lowercase = xvector_output_dim
@property
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 ) | 210 | 0 |
'''simple docstring'''
from math import factorial
def A__ ( UpperCAmelCase_ = 1_0_0 ):
return sum(int(UpperCAmelCase_ ) for x in str(factorial(UpperCAmelCase_ ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 236 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase , _UpperCamelCase : Optional[int] = np.shape(UpperCAmelCase_ )
if rows != columns:
_UpperCamelCase : Union[str, Any] = (
'\'table\' has to be of square shaped array but got a '
f'{rows}x{columns} array:\n{table}'
)
raise ValueError(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = np.zeros((rows, columns) )
_UpperCamelCase : Tuple = np.zeros((rows, columns) )
for i in range(UpperCAmelCase_ ):
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : Optional[Any] = sum(lower[i][k] * upper[k][j] for k in range(UpperCAmelCase_ ) )
if upper[j][j] == 0:
raise ArithmeticError('No LU decomposition exists' )
_UpperCamelCase : Optional[Any] = (table[i][j] - total) / upper[j][j]
_UpperCamelCase : int = 1
for j in range(UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Optional[int] = sum(lower[i][k] * upper[k][j] for k in range(UpperCAmelCase_ ) )
_UpperCamelCase : Tuple = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 236 | 1 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=64 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : List[str] = seq_length
_lowerCamelCase : Dict = is_training
_lowerCamelCase : int = use_input_mask
_lowerCamelCase : List[Any] = use_token_type_ids
_lowerCamelCase : int = use_labels
_lowerCamelCase : str = vocab_size
_lowerCamelCase : str = hidden_size
_lowerCamelCase : Any = embedding_size
_lowerCamelCase : Any = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : Dict = intermediate_size
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : List[Any] = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : Any = max_position_embeddings
_lowerCamelCase : List[str] = type_vocab_size
_lowerCamelCase : List[Any] = type_sequence_label_size
_lowerCamelCase : Optional[Any] = initializer_range
_lowerCamelCase : str = num_labels
_lowerCamelCase : int = num_choices
_lowerCamelCase : Any = scope
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : int = None
if self.use_input_mask:
_lowerCamelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Optional[Any] = None
if self.use_token_type_ids:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase : Dict = None
_lowerCamelCase : Any = None
_lowerCamelCase : Union[str, Any] = None
if self.use_labels:
_lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : Union[str, Any] = MobileBertModel(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : Optional[int] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase )
_lowerCamelCase : Union[str, Any] = model(lowercase , token_type_ids=lowercase )
_lowerCamelCase : List[str] = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : Union[str, Any] = MobileBertForMaskedLM(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : Optional[Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : List[Any] = MobileBertForNextSentencePrediction(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : List[str] = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : str = MobileBertForPreTraining(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : int = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , next_sentence_label=lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : Any = MobileBertForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : str = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : Optional[int] = self.num_labels
_lowerCamelCase : List[str] = MobileBertForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : str = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : Dict = self.num_labels
_lowerCamelCase : Optional[int] = MobileBertForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : Optional[Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : List[str] = self.num_choices
_lowerCamelCase : Optional[int] = MobileBertForMultipleChoice(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : Tuple = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self ):
_lowerCamelCase : Tuple = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
),
) : Union[str, Any] = config_and_inputs
_lowerCamelCase : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( lowercase, lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
"""feature-extraction""": MobileBertModel,
"""fill-mask""": MobileBertForMaskedLM,
"""question-answering""": MobileBertForQuestionAnswering,
"""text-classification""": MobileBertForSequenceClassification,
"""token-classification""": MobileBertForTokenClassification,
"""zero-shot""": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = True
def A_ ( self , lowercase , lowercase , lowercase=False ):
_lowerCamelCase : Optional[int] = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class in get_values(lowercase ):
_lowerCamelCase : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowercase )
_lowerCamelCase : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def A_ ( self ):
_lowerCamelCase : int = MobileBertModelTester(self )
_lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def A_ ( self ):
self.config_tester.run_common_tests()
def A_ ( self ):
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowercase )
def A_ ( self ):
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowercase )
def A_ ( self ):
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowercase )
def A_ ( self ):
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowercase )
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowercase )
def A_ ( self ):
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowercase )
def A_ ( self ):
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowercase )
def A_ ( self ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowercase )
def _snake_case ( lowercase__ ):
return torch.tensor(
lowercase__ , dtype=torch.long , device=lowercase__ , )
lowercase__ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A_ ( self ):
_lowerCamelCase : Optional[int] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(lowercase )
_lowerCamelCase : Tuple = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
_lowerCamelCase : Any = model(lowercase )[0]
_lowerCamelCase : Tuple = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , lowercase )
_lowerCamelCase : List[Any] = torch.tensor(
[
[
[-2.473_6526E07, 8.269_1656E04, 1.652_1838E05],
[-5.754_1704E-01, 3.905_6022E00, 4.401_1507E00],
[2.604_7359E00, 1.567_7652E00, -1.732_4188E-01],
]
] , device=lowercase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
_lowerCamelCase : Any = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
_lowerCamelCase : str = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound ) | 96 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 249 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class UpperCAmelCase__ ( A__ , A__ , A__ , unittest.TestCase ):
"""simple docstring"""
a = StableDiffusionLatentUpscalePipeline
a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
a = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a = frozenset([] )
a = True
@property
def lowercase_ ( self : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = (16, 16)
SCREAMING_SNAKE_CASE__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowerCamelCase )
return image
def lowercase_ ( self : Optional[int] ) -> List[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=__lowerCamelCase , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=__lowerCamelCase , only_cross_attention=__lowerCamelCase , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
SCREAMING_SNAKE_CASE__ = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
SCREAMING_SNAKE_CASE__ = EulerDiscreteScheduler(prediction_type='''sample''' )
SCREAMING_SNAKE_CASE__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''quick_gelu''' , projection_dim=512 , )
SCREAMING_SNAKE_CASE__ = CLIPTextModel(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__ = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def lowercase_ ( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any]=0 ) -> int:
if str(__lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ = '''cpu'''
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = pipe(**__lowerCamelCase ).images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
SCREAMING_SNAKE_CASE__ = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
SCREAMING_SNAKE_CASE__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCamelCase , 1e-3 )
def lowercase_ ( self : Optional[Any] ) -> Any:
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def lowercase_ ( self : int ) -> str:
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def lowercase_ ( self : int ) -> Optional[int]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def lowercase_ ( self : Any ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def lowercase_ ( self : List[Any] ) -> str:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def lowercase_ ( self : Any ) -> int:
super().test_save_load_local(expected_max_difference=3e-3 )
def lowercase_ ( self : List[Any] ) -> Tuple:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def lowercase_ ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = self.pipeline_class(**__lowerCamelCase )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
SCREAMING_SNAKE_CASE__ = getattr(__lowerCamelCase , scheduler_enum.name )
SCREAMING_SNAKE_CASE__ = scheduler_cls.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE__ = pipe(**__lowerCamelCase )[0]
outputs.append(__lowerCamelCase )
assert check_same_shape(__lowerCamelCase )
@require_torch_gpu
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Any ) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE__ = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
SCREAMING_SNAKE_CASE__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
SCREAMING_SNAKE_CASE__ = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
SCREAMING_SNAKE_CASE__ = pipe(__lowerCamelCase , generator=__lowerCamelCase , output_type='''latent''' ).images
SCREAMING_SNAKE_CASE__ = upscaler(
prompt=__lowerCamelCase , image=__lowerCamelCase , num_inference_steps=20 , guidance_scale=0 , generator=__lowerCamelCase , output_type='''np''' , ).images[0]
SCREAMING_SNAKE_CASE__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def lowercase_ ( self : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
SCREAMING_SNAKE_CASE__ = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
SCREAMING_SNAKE_CASE__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
SCREAMING_SNAKE_CASE__ = upscaler(
prompt=__lowerCamelCase , image=__lowerCamelCase , num_inference_steps=20 , guidance_scale=0 , generator=__lowerCamelCase , output_type='''np''' , ).images[0]
SCREAMING_SNAKE_CASE__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5e-2
| 367 |
from __future__ import annotations
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = len(_A ) // 2
# choose the middle 3 elements
SCREAMING_SNAKE_CASE__ = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 218 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class lowerCAmelCase ( __UpperCamelCase ):
def __init__( self : Optional[Any] , UpperCAmelCase : int ) -> int:
lowerCamelCase__ : Tuple = data
def __iter__( self : Dict ) -> List[str]:
for element in self.data:
yield element
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase=True ) -> int:
lowerCamelCase__ : Optional[int] = Accelerator(even_batches=_UpperCAmelCase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False ) -> List[str]:
if iterable:
lowerCamelCase__ : Optional[Any] = DummyIterableDataset(torch.as_tensor(range(_UpperCAmelCase ) ) )
else:
lowerCamelCase__ : Tuple = TensorDataset(torch.as_tensor(range(_UpperCAmelCase ) ) )
lowerCamelCase__ : str = DataLoader(_UpperCAmelCase , batch_size=_UpperCAmelCase )
lowerCamelCase__ : str = accelerator.prepare(_UpperCAmelCase )
return dl
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> str:
lowerCamelCase__ : Any = create_dataloader(accelerator=_UpperCAmelCase , dataset_size=_UpperCAmelCase , batch_size=_UpperCAmelCase )
lowerCamelCase__ : Any = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
lowerCamelCase__ : List[str] = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
_UpperCAmelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
_UpperCAmelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
lowerCamelCase__ : Tuple = create_accelerator(even_batches=_UpperCAmelCase )
verify_dataloader_batch_sizes(
_UpperCAmelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
_UpperCAmelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
lowerCamelCase__ : Tuple = create_accelerator(even_batches=_UpperCAmelCase )
lowerCamelCase__ : Any = torch.nn.Linear(1 , 1 )
lowerCamelCase__ : List[Any] = accelerator.prepare(_UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = create_dataloader(_UpperCAmelCase , dataset_size=3 , batch_size=1 )
lowerCamelCase__ : List[str] = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(_UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = ddp_model(batch[0].float() )
lowerCamelCase__ : int = output.sum()
loss.backward()
batch_idxs.append(_UpperCAmelCase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
with warnings.catch_warnings(record=_UpperCAmelCase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , _UpperCAmelCase )
assert "only supported for multi-GPU" in str(w[-1].message )
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : List[str] = create_accelerator(even_batches=_UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = torch.nn.Linear(1 , 1 )
lowerCamelCase__ : List[Any] = accelerator.prepare(_UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = create_dataloader(_UpperCAmelCase , dataset_size=3 , batch_size=1 )
lowerCamelCase__ : List[Any] = create_dataloader(_UpperCAmelCase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = train_dl.batch_sampler.even_batches
lowerCamelCase__ : Union[str, Any] = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : Any = create_accelerator(even_batches=_UpperCAmelCase )
lowerCamelCase__ : Tuple = torch.nn.Linear(1 , 1 )
lowerCamelCase__ : Tuple = accelerator.prepare(_UpperCAmelCase )
create_dataloader(_UpperCAmelCase , dataset_size=3 , batch_size=1 , iterable=_UpperCAmelCase )
lowerCamelCase__ : int = create_dataloader(_UpperCAmelCase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('ignore' )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_UpperCAmelCase ):
lowerCamelCase__ : str = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def SCREAMING_SNAKE_CASE ( ) -> Dict:
lowerCamelCase__ : int = create_accelerator()
lowerCamelCase__ : str = torch.nn.Linear(1 , 1 )
lowerCamelCase__ : Any = accelerator.prepare(_UpperCAmelCase )
create_dataloader(_UpperCAmelCase , dataset_size=3 , batch_size=1 , iterable=_UpperCAmelCase )
with warnings.catch_warnings(record=_UpperCAmelCase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_UpperCAmelCase ):
pass
assert issubclass(w[-1].category , _UpperCAmelCase )
assert "only supported for map-style datasets" in str(w[-1].message )
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
lowerCamelCase__ : str = create_accelerator()
accelerator.print('Test that even_batches variable ensures uniform batches across processes' )
test_default_ensures_even_batch_sizes()
accelerator.print('Run tests with even_batches disabled' )
test_can_disable_even_batches()
accelerator.print('Test joining uneven inputs' )
test_can_join_uneven_inputs()
accelerator.print('Test overriding even_batches when joining uneven inputs' )
test_join_can_override_even_batches()
accelerator.print('Test overriding even_batches for mixed dataloader types' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('Test overriding even_batches raises a warning for iterable dataloaders' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('Test join with non DDP distributed raises warning' )
lowerCamelCase__ : Dict = accelerator.state.distributed_type
lowerCamelCase__ : List[Any] = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(_UpperCAmelCase )
lowerCamelCase__ : Dict = original_state
if __name__ == "__main__":
main()
| 50 |
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__A : List[Any] = True
except ImportError:
__A : int = False
__A : str = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCamelCase_ ( A__ : Namespace ):
'''simple docstring'''
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
@staticmethod
def __lowercase ( lowerCamelCase : ArgumentParser ) -> int:
lowerCAmelCase_ : Optional[int] = parser.add_parser("""add-new-model""" )
add_new_model_parser.add_argument("""--testing""" , action="""store_true""" , help="""If in testing mode.""" )
add_new_model_parser.add_argument("""--testing_file""" , type=lowerCamelCase , help="""Configuration file on which to run.""" )
add_new_model_parser.add_argument(
"""--path""" , type=lowerCamelCase , help="""Path to cookiecutter. Should only be used for testing purposes.""" )
add_new_model_parser.set_defaults(func=lowerCamelCase )
def __init__( self : List[str] , lowerCamelCase : bool , lowerCamelCase : str , lowerCamelCase : Any=None , *lowerCamelCase : List[str] ) -> Optional[Any]:
lowerCAmelCase_ : int = testing
lowerCAmelCase_ : Union[str, Any] = testing_file
lowerCAmelCase_ : Tuple = path
def __lowercase ( self : Tuple ) -> int:
warnings.warn(
"""The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. """
"""It is not actively maintained anymore, so might give a result that won't pass all tests and quality """
"""checks, you should use `transformers-cli add-new-model-like` instead.""" )
if not _has_cookiecutter:
raise ImportError(
"""Model creation dependencies are required to use the `add_new_model` command. Install them by running """
"""the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n""" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
lowerCAmelCase_ : int = [directory for directory in os.listdir() if """cookiecutter-template-""" == directory[:22]]
if len(lowerCamelCase ) > 0:
raise ValueError(
"""Several directories starting with `cookiecutter-template-` in current working directory. """
"""Please clean your directory by removing all folders starting with `cookiecutter-template-` or """
"""change your working directory.""" )
lowerCAmelCase_ : List[Any] = (
Path(lowerCamelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
lowerCAmelCase_ : Dict = path_to_transformer_root / """templates""" / """adding_a_new_model"""
# Execute cookiecutter
if not self._testing:
cookiecutter(str(lowerCamelCase ) )
else:
with open(self._testing_file , """r""" ) as configuration_file:
lowerCAmelCase_ : Tuple = json.load(lowerCamelCase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=lowerCamelCase , extra_context=lowerCamelCase , )
lowerCAmelCase_ : List[str] = [directory for directory in os.listdir() if """cookiecutter-template-""" in directory[:22]][0]
# Retrieve configuration
with open(directory + """/configuration.json""" , """r""" ) as configuration_file:
lowerCAmelCase_ : Tuple = json.load(lowerCamelCase )
lowerCAmelCase_ : str = configuration["""lowercase_modelname"""]
lowerCAmelCase_ : List[str] = configuration["""generate_tensorflow_pytorch_and_flax"""]
os.remove(F'{directory}/configuration.json' )
lowerCAmelCase_ : Dict = """PyTorch""" in generate_tensorflow_pytorch_and_flax
lowerCAmelCase_ : Optional[int] = """TensorFlow""" in generate_tensorflow_pytorch_and_flax
lowerCAmelCase_ : List[str] = """Flax""" in generate_tensorflow_pytorch_and_flax
lowerCAmelCase_ : Union[str, Any] = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' , exist_ok=lowerCamelCase )
# Tests require submodules as they have parent imports
with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' , """w""" ):
pass
shutil.move(
F'{directory}/__init__.py' , F'{model_dir}/__init__.py' , )
shutil.move(
F'{directory}/configuration_{lowercase_model_name}.py' , F'{model_dir}/configuration_{lowercase_model_name}.py' , )
def remove_copy_lines(lowerCamelCase : Any ):
with open(lowerCamelCase , """r""" ) as f:
lowerCAmelCase_ : List[str] = f.readlines()
with open(lowerCamelCase , """w""" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(lowerCamelCase )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_{lowercase_model_name}.py' , F'{model_dir}/modeling_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_tf_{lowercase_model_name}.py' , F'{model_dir}/modeling_tf_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_tf_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_flax_{lowercase_model_name}.py' , F'{model_dir}/modeling_flax_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_flax_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/{lowercase_model_name}.md' , F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' , )
shutil.move(
F'{directory}/tokenization_{lowercase_model_name}.py' , F'{model_dir}/tokenization_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/tokenization_fast_{lowercase_model_name}.py' , F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : List[str] ):
# Create temp file
lowerCAmelCase_, lowerCAmelCase_ : int = mkstemp()
lowerCAmelCase_ : List[Any] = False
with fdopen(lowerCamelCase , """w""" ) as new_file:
with open(lowerCamelCase ) as old_file:
for line in old_file:
new_file.write(lowerCamelCase )
if line_to_copy_below in line:
lowerCAmelCase_ : List[str] = True
for line_to_copy in lines_to_copy:
new_file.write(lowerCamelCase )
if not line_found:
raise ValueError(F'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(lowerCamelCase , lowerCamelCase )
# Remove original file
remove(lowerCamelCase )
# Move new file
move(lowerCamelCase , lowerCamelCase )
def skip_units(lowerCamelCase : Optional[int] ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(lowerCamelCase : Any ):
with open(lowerCamelCase ) as datafile:
lowerCAmelCase_ : Dict = []
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : str = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
lowerCAmelCase_ : Dict = line.split("""\"""" )[1]
lowerCAmelCase_ : int = skip_units(lowerCamelCase )
elif "# Below: " in line and "##" not in line:
lowerCAmelCase_ : Any = line.split("""\"""" )[1]
lowerCAmelCase_ : Tuple = skip_units(lowerCamelCase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : Dict = []
elif "# Replace with" in line and "##" not in line:
lowerCAmelCase_ : int = []
elif "##" not in line:
lines_to_copy.append(lowerCamelCase )
remove(lowerCamelCase )
replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(lowerCamelCase )
| 120 | 0 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( ) -> int:
__A : Any = 10
__A : Dict = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
__A : Dict = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(lowerCamelCase__ ) ),
} , features=lowerCamelCase__ , )
return dataset
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : str , __snake_case : List[str] ) -> Union[str, Any]:
__A : Dict = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=lowerCamelCase__ )
return filename
# FILE_CONTENT + files
lowercase__ : str = '''\
Text data.
Second line of data.'''
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : Dict ) -> List[str]:
__A : Union[str, Any] = tmp_path_factory.mktemp('data' ) / '''file.txt'''
__A : int = FILE_CONTENT
with open(lowerCamelCase__ , 'w' ) as f:
f.write(lowerCamelCase__ )
return filename
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Dict:
import bza
__A : int = tmp_path_factory.mktemp('data' ) / '''file.txt.bz2'''
__A : Union[str, Any] = bytes(lowerCamelCase__ , 'utf-8' )
with bza.open(lowerCamelCase__ , 'wb' ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : List[str] ) -> List[Any]:
import gzip
__A : List[Any] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
__A : int = bytes(lowerCamelCase__ , 'utf-8' )
with gzip.open(lowerCamelCase__ , 'wb' ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : List[str] ) -> Optional[Any]:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
__A : List[str] = tmp_path_factory.mktemp('data' ) / '''file.txt.lz4'''
__A : Optional[int] = bytes(lowerCamelCase__ , 'utf-8' )
with lza.frame.open(lowerCamelCase__ , 'wb' ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : str , __snake_case : Any ) -> List[str]:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
__A : int = tmp_path_factory.mktemp('data' ) / '''file.txt.7z'''
with pyazr.SevenZipFile(lowerCamelCase__ , 'w' ) as archive:
archive.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
return path
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : int , __snake_case : str ) -> List[Any]:
import tarfile
__A : Optional[Any] = tmp_path_factory.mktemp('data' ) / '''file.txt.tar'''
with tarfile.TarFile(lowerCamelCase__ , 'w' ) as f:
f.add(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
return path
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : str ) -> str:
import lzma
__A : List[str] = tmp_path_factory.mktemp('data' ) / '''file.txt.xz'''
__A : Any = bytes(lowerCamelCase__ , 'utf-8' )
with lzma.open(lowerCamelCase__ , 'wb' ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : Dict ) -> Union[str, Any]:
import zipfile
__A : Union[str, Any] = tmp_path_factory.mktemp('data' ) / '''file.txt.zip'''
with zipfile.ZipFile(lowerCamelCase__ , 'w' ) as f:
f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
return path
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : List[str] ) -> int:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
__A : Any = tmp_path_factory.mktemp('data' ) / '''file.txt.zst'''
__A : List[str] = bytes(lowerCamelCase__ , 'utf-8' )
with zstd.open(lowerCamelCase__ , 'wb' ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : Tuple ) -> Tuple:
__A : List[Any] = tmp_path_factory.mktemp('data' ) / '''file.xml'''
__A : Optional[int] = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(lowerCamelCase__ , 'w' ) as f:
f.write(lowerCamelCase__ )
return filename
lowercase__ : Optional[Any] = [
{'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0},
]
lowercase__ : Optional[Any] = [
{'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0},
{'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0},
]
lowercase__ : int = {
'''col_1''': ['''0''', '''1''', '''2''', '''3'''],
'''col_2''': [0, 1, 2, 3],
'''col_3''': [0.0, 1.0, 2.0, 3.0],
}
lowercase__ : List[str] = [
{'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0},
{'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1},
]
lowercase__ : List[Any] = [
{'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0},
]
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( ) -> List[str]:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Any:
__A : Optional[Any] = datasets.Dataset.from_dict(lowerCamelCase__ )
__A : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=lowerCamelCase__ )
return path
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : List[str] ) -> List[Any]:
__A : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(lowerCamelCase__ ) ) as con:
__A : str = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : int ) -> Optional[int]:
__A : str = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(lowerCamelCase__ , 'w' , newline='' ) as f:
__A : Any = csv.DictWriter(lowerCamelCase__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase__ )
return path
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : List[str] ) -> Optional[int]:
__A : str = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(lowerCamelCase__ , 'w' , newline='' ) as f:
__A : str = csv.DictWriter(lowerCamelCase__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase__ )
return path
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : Optional[int] , __snake_case : List[Any] ) -> int:
import bza
__A : List[str] = tmp_path_factory.mktemp('data' ) / '''dataset.csv.bz2'''
with open(lowerCamelCase__ , 'rb' ) as f:
__A : Union[str, Any] = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowerCamelCase__ , 'wb' ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : int , __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> List[Any]:
__A : Tuple = tmp_path_factory.mktemp('data' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(lowerCamelCase__ , 'w' ) as f:
f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
return path
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : Optional[Any] ) -> Optional[int]:
__A : Dict = tmp_path_factory.mktemp('data' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(lowerCamelCase__ , 'w' ) as f:
f.write(lowerCamelCase__ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(lowerCamelCase__ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : int , __snake_case : Optional[int] , __snake_case : Any ) -> List[Any]:
__A : Optional[int] = tmp_path_factory.mktemp('data' ) / '''dataset_with_dir.csv.zip'''
with zipfile.ZipFile(lowerCamelCase__ , 'w' ) as f:
f.write(lowerCamelCase__ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase__ ) ) )
f.write(lowerCamelCase__ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> List[str]:
__A : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
__A : Optional[int] = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(lowerCamelCase__ , 'wb' ) as f:
__A : List[str] = pq.ParquetWriter(lowerCamelCase__ , schema=lowerCamelCase__ )
__A : Union[str, Any] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowerCamelCase__ ) )] for k in DATA[0]} , schema=lowerCamelCase__ )
writer.write_table(lowerCamelCase__ )
writer.close()
return path
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : int ) -> List[Any]:
__A : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
__A : List[Any] = {'''data''': DATA}
with open(lowerCamelCase__ , 'w' ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
return path
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : str ) -> Dict:
__A : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
__A : Optional[Any] = {'''data''': DATA_DICT_OF_LISTS}
with open(lowerCamelCase__ , 'w' ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
return path
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Optional[int]:
__A : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(lowerCamelCase__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : Union[str, Any] ) -> List[str]:
__A : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(lowerCamelCase__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : int ) -> List[Any]:
__A : Any = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(lowerCamelCase__ , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(lowerCamelCase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Optional[Any]:
__A : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(lowerCamelCase__ , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(lowerCamelCase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : Tuple , __snake_case : str ) -> List[Any]:
import gzip
__A : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(lowerCamelCase__ , 'rb' ) as orig_file:
with gzip.open(lowerCamelCase__ , 'wb' ) as zipped_file:
zipped_file.writelines(lowerCamelCase__ )
return path
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : Tuple , __snake_case : Union[str, Any] ) -> List[str]:
import gzip
__A : str = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(lowerCamelCase__ , 'rb' ) as orig_file:
with gzip.open(lowerCamelCase__ , 'wb' ) as zipped_file:
zipped_file.writelines(lowerCamelCase__ )
return path
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : Any , __snake_case : List[str] , __snake_case : List[Any] ) -> str:
__A : Any = tmp_path_factory.mktemp('data' ) / '''dataset.jsonl.zip'''
with zipfile.ZipFile(lowerCamelCase__ , 'w' ) as f:
f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
return path
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : Any , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Optional[int] ) -> Union[str, Any]:
__A : str = tmp_path_factory.mktemp('data' ) / '''dataset_nested.jsonl.zip'''
with zipfile.ZipFile(lowerCamelCase__ , 'w' ) as f:
f.write(lowerCamelCase__ , arcname=os.path.join('nested' , os.path.basename(lowerCamelCase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> List[str]:
__A : Optional[int] = tmp_path_factory.mktemp('data' ) / '''dataset_with_dir.jsonl.zip'''
with zipfile.ZipFile(lowerCamelCase__ , 'w' ) as f:
f.write(lowerCamelCase__ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase__ ) ) )
f.write(lowerCamelCase__ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : int ) -> Tuple:
__A : List[Any] = tmp_path_factory.mktemp('data' ) / '''dataset.jsonl.tar'''
with tarfile.TarFile(lowerCamelCase__ , 'w' ) as f:
f.add(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
f.add(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
return path
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : Any , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : List[Any] ) -> Any:
__A : int = tmp_path_factory.mktemp('data' ) / '''dataset_nested.jsonl.tar'''
with tarfile.TarFile(lowerCamelCase__ , 'w' ) as f:
f.add(lowerCamelCase__ , arcname=os.path.join('nested' , os.path.basename(lowerCamelCase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : Tuple ) -> Tuple:
__A : Any = ['''0''', '''1''', '''2''', '''3''']
__A : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(lowerCamelCase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : Dict ) -> Any:
__A : Union[str, Any] = ['''0''', '''1''', '''2''', '''3''']
__A : int = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(lowerCamelCase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : int ) -> Union[str, Any]:
__A : Union[str, Any] = ['''0''', '''1''', '''2''', '''3''']
__A : str = tmp_path_factory.mktemp('data' ) / '''dataset.abc'''
with open(lowerCamelCase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : Any , __snake_case : Union[str, Any] ) -> Optional[int]:
__A : Dict = tmp_path_factory.mktemp('data' ) / '''dataset.text.zip'''
with zipfile.ZipFile(lowerCamelCase__ , 'w' ) as f:
f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
return path
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Any ) -> List[Any]:
__A : int = tmp_path_factory.mktemp('data' ) / '''dataset_with_dir.text.zip'''
with zipfile.ZipFile(lowerCamelCase__ , 'w' ) as f:
f.write(lowerCamelCase__ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase__ ) ) )
f.write(lowerCamelCase__ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> str:
__A : Union[str, Any] = tmp_path_factory.mktemp('data' ) / '''dataset.ext.zip'''
with zipfile.ZipFile(lowerCamelCase__ , 'w' ) as f:
f.write(lowerCamelCase__ , arcname=os.path.basename('unsupported.ext' ) )
f.write(lowerCamelCase__ , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : int ) -> List[Any]:
__A : Union[str, Any] = '''\n'''.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
__A : Any = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(lowerCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( ) -> List[Any]:
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( ) -> Optional[int]:
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : int , __snake_case : int ) -> List[str]:
__A : Tuple = tmp_path_factory.mktemp('data' ) / '''dataset.img.zip'''
with zipfile.ZipFile(lowerCamelCase__ , 'w' ) as f:
f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( __snake_case : Any ) -> Tuple:
__A : Union[str, Any] = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir | 354 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''):
lowercase__ : Dict = {
'''linear''': PIL.Image.Resampling.BILINEAR,
'''bilinear''': PIL.Image.Resampling.BILINEAR,
'''bicubic''': PIL.Image.Resampling.BICUBIC,
'''lanczos''': PIL.Image.Resampling.LANCZOS,
'''nearest''': PIL.Image.Resampling.NEAREST,
}
else:
lowercase__ : Any = {
'''linear''': PIL.Image.LINEAR,
'''bilinear''': PIL.Image.BILINEAR,
'''bicubic''': PIL.Image.BICUBIC,
'''lanczos''': PIL.Image.LANCZOS,
'''nearest''': PIL.Image.NEAREST,
}
def _lowerCAmelCase ( __snake_case : Any ) -> Optional[Any]:
__A : Dict = (images / 2 + 0.5).clamp(0 , 1 )
__A : str = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__A : Dict = numpy_to_pil(__snake_case )
return images
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Optional[Any]:
if images.ndim == 3:
__A : List[Any] = images[None, ...]
__A : List[str] = (images * 2_55).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
__A : str = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
__A : str = [Image.fromarray(__snake_case ) for image in images]
return pil_images | 190 | 0 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : List[Any] = FileLock(str(tmpdir / "foo.lock" ) )
lowerCAmelCase : Dict = FileLock(str(tmpdir / "foo.lock" ) )
lowerCAmelCase : int = 0.01
with locka.acquire():
with pytest.raises(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Dict = time.time()
locka.acquire(SCREAMING_SNAKE_CASE )
assert time.time() - _start > timeout
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : List[str] = "a" * 1_0_0_0 + ".lock"
lowerCAmelCase : Optional[Any] = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(SCREAMING_SNAKE_CASE )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
lowerCAmelCase : List[str] = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(SCREAMING_SNAKE_CASE ):
locka.acquire(0 )
| 108 |
"""simple docstring"""
from __future__ import annotations
import math
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Any = size
# approximate the overall size of segment tree with given value
lowerCAmelCase : Optional[int] = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
lowerCAmelCase : List[str] = [0 for i in range(0 , 4 * size )]
lowerCAmelCase : Dict = [0 for i in range(0 , 4 * size )] # flag for lazy update
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
return idx * 2
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
return idx * 2 + 1
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
if left_element == right_element:
lowerCAmelCase : List[str] = a[left_element - 1]
else:
lowerCAmelCase : Tuple = (left_element + right_element) // 2
self.build(self.left(snake_case__ ) , snake_case__ , snake_case__ , snake_case__ )
self.build(self.right(snake_case__ ) , mid + 1 , snake_case__ , snake_case__ )
lowerCAmelCase : Tuple = max(
self.segment_tree[self.left(snake_case__ )] , self.segment_tree[self.right(snake_case__ )] )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
if self.flag[idx] is True:
lowerCAmelCase : Optional[int] = self.lazy[idx]
lowerCAmelCase : List[str] = False
if left_element != right_element:
lowerCAmelCase : Optional[Any] = self.lazy[idx]
lowerCAmelCase : List[Any] = self.lazy[idx]
lowerCAmelCase : List[Any] = True
lowerCAmelCase : Optional[Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
lowerCAmelCase : str = val
if left_element != right_element:
lowerCAmelCase : Optional[Any] = val
lowerCAmelCase : Union[str, Any] = val
lowerCAmelCase : int = True
lowerCAmelCase : int = True
return True
lowerCAmelCase : List[str] = (left_element + right_element) // 2
self.update(self.left(snake_case__ ) , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
self.update(self.right(snake_case__ ) , mid + 1 , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : Optional[int] = max(
self.segment_tree[self.left(snake_case__ )] , self.segment_tree[self.right(snake_case__ )] )
return True
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
if self.flag[idx] is True:
lowerCAmelCase : List[Any] = self.lazy[idx]
lowerCAmelCase : str = False
if left_element != right_element:
lowerCAmelCase : Tuple = self.lazy[idx]
lowerCAmelCase : List[Any] = self.lazy[idx]
lowerCAmelCase : Optional[int] = True
lowerCAmelCase : str = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
lowerCAmelCase : Any = (left_element + right_element) // 2
lowerCAmelCase : Optional[int] = self.query(self.left(snake_case__ ) , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : Dict = self.query(self.right(snake_case__ ) , mid + 1 , snake_case__ , snake_case__ , snake_case__ )
return max(snake_case__ , snake_case__ )
def __str__( self ):
"""simple docstring"""
return str([self.query(1 , 1 , self.size , snake_case__ , snake_case__ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
lowerCAmelCase__ = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
lowerCAmelCase__ = 15
lowerCAmelCase__ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 108 | 1 |
from __future__ import annotations
from typing import Any
class __a ( __UpperCamelCase ):
pass
class __a :
def __init__( self , lowerCAmelCase__ ) -> None:
'''simple docstring'''
lowercase__: Any = data
lowercase__: Node | None = None
def __iter__( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Tuple = self
lowercase__: Tuple = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(lowerCAmelCase__ )
yield node.data
lowercase__: Optional[int] = node.next_node
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> bool:
'''simple docstring'''
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
__lowerCAmelCase = Node(1)
__lowerCAmelCase = Node(2)
__lowerCAmelCase = Node(3)
__lowerCAmelCase = Node(4)
print(root_node.has_loop) # False
__lowerCAmelCase = root_node.next_node
print(root_node.has_loop) # True
__lowerCAmelCase = Node(5)
__lowerCAmelCase = Node(6)
__lowerCAmelCase = Node(5)
__lowerCAmelCase = Node(6)
print(root_node.has_loop) # False
__lowerCAmelCase = Node(1)
print(root_node.has_loop) # False
| 288 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class __a ( __UpperCamelCase ):
def __init__( self , lowerCAmelCase__ = 101 ) -> Any:
'''simple docstring'''
lowercase__: Any = length
def __len__( self ) -> List[Any]:
'''simple docstring'''
return self.length
def __getitem__( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
return i
class __a :
def __call__( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
return {"input_ids": torch.tensor(lowerCAmelCase__ ), "labels": torch.tensor(lowerCAmelCase__ )}
class __a ( nn.Module ):
def __init__( self ) -> Tuple:
'''simple docstring'''
super().__init__()
# Add some (unused) params otherwise DDP will complain.
lowercase__: List[str] = nn.Linear(120 , 80 )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> int:
'''simple docstring'''
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class __a ( __UpperCamelCase ):
@require_torch_neuroncore
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: int = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
lowercase__: Tuple = self.get_auto_remove_tmp_dir()
lowercase__: Optional[int] = F'--output_dir {output_dir}'.split()
lowercase__: int = ['torchrun'] + distributed_args + args
execute_subprocess_async(lowerCAmelCase__ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class __a ( __UpperCamelCase ):
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: List[str] = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
lowercase__: Tuple = self.get_auto_remove_tmp_dir()
lowercase__: List[str] = F'--output_dir {output_dir}'.split()
lowercase__: int = ['torchrun'] + distributed_args + args
execute_subprocess_async(lowerCAmelCase__ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__lowerCAmelCase = HfArgumentParser((TrainingArguments,))
__lowerCAmelCase = parser.parse_args_into_dataclasses()[0]
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
F'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_01, 40, 7]:
__lowerCAmelCase = DummyDataset(dataset_length)
def snake_case_ ( snake_case ) -> Dict:
lowercase__: str = list(range(len(snake_case ) ) )
lowercase__: Tuple = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'Predictions and/or labels do not match expected results:\n - predictions: '
f'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
__lowerCAmelCase = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__lowerCAmelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__lowerCAmelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__lowerCAmelCase = 2
__lowerCAmelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__lowerCAmelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__lowerCAmelCase = None
| 288 | 1 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase ( __UpperCAmelCase):
def __init__( self : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Any=13 , _lowerCamelCase : List[str]=7 , _lowerCamelCase : Any=True , _lowerCamelCase : int=True , _lowerCamelCase : Dict=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Optional[Any]=99 , _lowerCamelCase : int=32 , _lowerCamelCase : Tuple=5 , _lowerCamelCase : Tuple=4 , _lowerCamelCase : List[str]=37 , _lowerCamelCase : Any="gelu" , _lowerCamelCase : str=0.1 , _lowerCamelCase : List[str]=0.1 , _lowerCamelCase : Union[str, Any]=5_12 , _lowerCamelCase : Union[str, Any]=16 , _lowerCamelCase : Optional[int]=2 , _lowerCamelCase : Any=0.02 , _lowerCamelCase : Dict=False , _lowerCamelCase : List[str]=True , _lowerCamelCase : int="None" , _lowerCamelCase : Union[str, Any]=3 , _lowerCamelCase : Union[str, Any]=4 , _lowerCamelCase : int=None , ):
"""simple docstring"""
A_ : Tuple = parent
A_ : str = batch_size
A_ : Union[str, Any] = seq_length
A_ : Tuple = is_training
A_ : Dict = use_input_mask
A_ : List[Any] = use_token_type_ids
A_ : Any = use_labels
A_ : List[Any] = vocab_size
A_ : str = hidden_size
A_ : Optional[Any] = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : Dict = intermediate_size
A_ : int = hidden_act
A_ : int = hidden_dropout_prob
A_ : Union[str, Any] = attention_probs_dropout_prob
A_ : Tuple = max_position_embeddings
A_ : str = type_vocab_size
A_ : Dict = type_sequence_label_size
A_ : Optional[int] = initializer_range
A_ : str = num_labels
A_ : str = num_choices
A_ : List[Any] = relative_attention
A_ : Optional[int] = position_biased_input
A_ : Optional[int] = pos_att_type
A_ : Dict = scope
def a_ ( self : int ):
"""simple docstring"""
A_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : int = None
if self.use_input_mask:
A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
A_ : Tuple = None
if self.use_token_type_ids:
A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : Optional[int] = None
A_ : str = None
A_ : Tuple = None
if self.use_labels:
A_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : Any = ids_tensor([self.batch_size] , self.num_choices )
A_ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ ( self : int ):
"""simple docstring"""
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def a_ ( self : Any , _lowerCamelCase : Dict ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def a_ ( self : int , _lowerCamelCase : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : int ):
"""simple docstring"""
A_ : Union[str, Any] = DebertaVaModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Union[str, Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase )[0]
A_ : List[str] = model(_lowerCamelCase , token_type_ids=_lowerCamelCase )[0]
A_ : Optional[Any] = model(_lowerCamelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def a_ ( self : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : Tuple ):
"""simple docstring"""
A_ : List[Any] = DebertaVaForMaskedLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Optional[int] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple ):
"""simple docstring"""
A_ : Dict = self.num_labels
A_ : Any = DebertaVaForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Dict = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(_lowerCamelCase )
def a_ ( self : Union[str, Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : str , _lowerCamelCase : List[Any] , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : Optional[int] = self.num_labels
A_ : Tuple = DebertaVaForTokenClassification(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Optional[Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ ( self : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Tuple ):
"""simple docstring"""
A_ : int = DebertaVaForQuestionAnswering(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : List[Any] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a_ ( self : Union[str, Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str] , _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : List[str] ):
"""simple docstring"""
A_ : List[str] = DebertaVaForMultipleChoice(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ : Tuple = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
A_ : List[Any] = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Dict = config_and_inputs
A_ : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase):
__lowerCAmelCase : Union[str, Any] = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
__lowerCAmelCase : List[str] = (
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : Optional[int] = False
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : str = False
__lowerCAmelCase : int = False
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Optional[Any] = DebertaVaModelTester(self )
A_ : Tuple = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 )
def a_ ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
def a_ ( self : str ):
"""simple docstring"""
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_lowerCamelCase )
def a_ ( self : Dict ):
"""simple docstring"""
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_lowerCamelCase )
def a_ ( self : Any ):
"""simple docstring"""
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_lowerCamelCase )
def a_ ( self : int ):
"""simple docstring"""
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_lowerCamelCase )
def a_ ( self : Optional[int] ):
"""simple docstring"""
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_lowerCamelCase )
def a_ ( self : int ):
"""simple docstring"""
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*_lowerCamelCase )
@slow
def a_ ( self : List[Any] ):
"""simple docstring"""
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[Any] = DebertaVaModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase):
@unittest.skip(reason='''Model not available yet''' )
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
pass
@slow
def a_ ( self : Tuple ):
"""simple docstring"""
A_ : Optional[Any] = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
A_ : int = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
A_ : List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A_ : List[Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
# compare the actual values for a slice.
A_ : Optional[Any] = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowerCamelCase , atol=1E-4 ) , F"""{output[:, 1:4, 1:4]}""" )
| 167 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
_lowerCamelCase : Tuple = 'Create a default config file for Accelerate with only a few flags set.'
def lowercase_ ( _UpperCAmelCase="no" , _UpperCAmelCase = default_json_config_file , _UpperCAmelCase = False ):
"""simple docstring"""
A_ : str = Path(_UpperCAmelCase )
path.parent.mkdir(parents=_UpperCAmelCase , exist_ok=_UpperCAmelCase )
if path.exists():
print(
f"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
A_ : Optional[Any] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
A_ : str = {
'''compute_environment''': '''LOCAL_MACHINE''',
'''mixed_precision''': mixed_precision,
}
if torch.cuda.is_available():
A_ : int = torch.cuda.device_count()
A_ : int = num_gpus
A_ : Tuple = False
if num_gpus > 1:
A_ : Optional[int] = '''MULTI_GPU'''
else:
A_ : Union[str, Any] = '''NO'''
elif is_xpu_available() and use_xpu:
A_ : str = torch.xpu.device_count()
A_ : Optional[int] = num_xpus
A_ : List[str] = False
if num_xpus > 1:
A_ : Any = '''MULTI_XPU'''
else:
A_ : Optional[Any] = '''NO'''
elif is_npu_available():
A_ : Union[str, Any] = torch.npu.device_count()
A_ : Optional[int] = num_npus
A_ : Union[str, Any] = False
if num_npus > 1:
A_ : List[str] = '''MULTI_NPU'''
else:
A_ : Tuple = '''NO'''
else:
A_ : Union[str, Any] = 0
A_ : str = True
A_ : str = 1
A_ : List[Any] = '''NO'''
A_ : Dict = ClusterConfig(**_UpperCAmelCase )
config.to_json_file(_UpperCAmelCase )
return path
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : List[str] = parser.add_parser('''default''' , parents=_UpperCAmelCase , help=_UpperCAmelCase , formatter_class=_UpperCAmelCase )
parser.add_argument(
'''--config_file''' , default=_UpperCAmelCase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , dest='''save_location''' , )
parser.add_argument(
'''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=_UpperCAmelCase , help='''Whether or not to use mixed precision training. '''
'''Choose between FP16 and BF16 (bfloat16) training. '''
'''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , )
parser.set_defaults(func=_UpperCAmelCase )
return parser
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : str = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f"""accelerate configuration saved at {config_file}""" )
| 167 | 1 |
'''simple docstring'''
def lowerCAmelCase__ ( lowerCamelCase : list[list] ):
_A : Optional[Any] = current_set.copy()
for row_index, row in enumerate(lowerCamelCase ):
_A : Any = row[0]
for column_index, column in enumerate(lowerCamelCase ):
if magnitude == 0:
_A : Any = column
continue
_A : int = column / magnitude
# Subtract to cancel term
_A : Optional[int] = current_set[0]
_A : Optional[int] = [first_row]
_A : Any = current_set[1::]
for row in current_set:
_A : Any = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCamelCase )
continue
for column_index in range(len(lowerCamelCase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCamelCase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_A : List[str] = final_set[0]
_A : List[str] = []
_A : Union[str, Any] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_A : Any = simplify(lowerCamelCase )
for i in range(len(lowerCamelCase ) ):
resultant[i].insert(0 ,current_first_column[i] )
resultant.insert(0 ,lowerCamelCase )
_A : Any = resultant
return final_set
def lowerCAmelCase__ ( lowerCamelCase : list[list] ):
if len(lowerCamelCase ) == 0:
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
_A : Any = len(lowerCamelCase ) + 1
if any(len(lowerCamelCase ) != _length for item in equations ):
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
for row in equations:
if any(not isinstance(lowerCamelCase ,(int, float) ) for column in row ):
raise ValueError('solve_simultaneous() requires lists of integers' )
if len(lowerCamelCase ) == 1:
return [equations[0][-1] / equations[0][0]]
_A : List[str] = equations.copy()
if any(0 in row for row in data_set ):
_A : Optional[Any] = data_set.copy()
_A : Optional[int] = []
for row_index, row in enumerate(lowerCamelCase ):
if 0 not in row:
_A : List[str] = data_set.pop(lowerCamelCase )
break
if not full_row:
raise ValueError('solve_simultaneous() requires at least 1 full equation' )
data_set.insert(0 ,lowerCamelCase )
_A : str = data_set.copy()
_A : Union[str, Any] = simplify(lowerCamelCase )
_A : Dict = simplified[::-1]
_A : list = []
for row in simplified:
_A : Tuple = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_A : str = row.copy()[: len(lowerCamelCase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCamelCase ) == 0:
solutions.append(0 )
continue
_A : Optional[int] = temp_row[1::]
_A : int = temp_row[::-1]
for column_index, column in enumerate(lowerCamelCase ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCamelCase )
_A : List[str] = []
for item in solutions:
final.append(float(round(lowerCamelCase ,5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
A : List[Any] = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 227 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
a = KandinskyVaaPriorPipeline
a = ["prompt"]
a = ["prompt", "negative_prompt"]
a = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
a = False
@property
def A ( self : List[str]):
return 32
@property
def A ( self : List[Any]):
return 32
@property
def A ( self : Dict):
return self.time_input_dim
@property
def A ( self : Tuple):
return self.time_input_dim * 4
@property
def A ( self : Optional[int]):
return 100
@property
def A ( self : Dict):
_A : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def A ( self : Optional[Any]):
torch.manual_seed(0)
_A : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE)
@property
def A ( self : List[Any]):
torch.manual_seed(0)
_A : Optional[Any] = {
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
_A : Any = PriorTransformer(**SCREAMING_SNAKE_CASE)
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
_A : str = nn.Parameter(torch.ones(model.clip_std.shape))
return model
@property
def A ( self : List[str]):
torch.manual_seed(0)
_A : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
_A : Union[str, Any] = CLIPVisionModelWithProjection(SCREAMING_SNAKE_CASE)
return model
@property
def A ( self : int):
_A : Optional[Any] = CLIPImageProcessor(
crop_size=224 , do_center_crop=SCREAMING_SNAKE_CASE , do_normalize=SCREAMING_SNAKE_CASE , do_resize=SCREAMING_SNAKE_CASE , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , )
return image_processor
def A ( self : Optional[Any]):
_A : Optional[int] = self.dummy_prior
_A : Dict = self.dummy_image_encoder
_A : Dict = self.dummy_text_encoder
_A : str = self.dummy_tokenizer
_A : Optional[Any] = self.dummy_image_processor
_A : Optional[Any] = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=SCREAMING_SNAKE_CASE , clip_sample_range=10.0 , )
_A : Dict = {
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def A ( self : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str]=0):
if str(SCREAMING_SNAKE_CASE).startswith('mps'):
_A : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE)
else:
_A : int = torch.Generator(device=SCREAMING_SNAKE_CASE).manual_seed(SCREAMING_SNAKE_CASE)
_A : List[Any] = {
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def A ( self : List[Any]):
_A : str = 'cpu'
_A : Tuple = self.get_dummy_components()
_A : List[Any] = self.pipeline_class(**SCREAMING_SNAKE_CASE)
_A : Any = pipe.to(SCREAMING_SNAKE_CASE)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE)
_A : Dict = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE))
_A : str = output.image_embeds
_A : Optional[int] = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE) , return_dict=SCREAMING_SNAKE_CASE , )[0]
_A : Optional[int] = image[0, -10:]
_A : int = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
_A : Dict = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@skip_mps
def A ( self : Any):
_A : Tuple = torch_device == 'cpu'
_A : Optional[int] = True
_A : Tuple = False
self._test_inference_batch_single_identical(
test_max_difference=SCREAMING_SNAKE_CASE , relax_max_difference=SCREAMING_SNAKE_CASE , test_mean_pixel_difference=SCREAMING_SNAKE_CASE , )
@skip_mps
def A ( self : int):
_A : Tuple = torch_device == 'cpu'
_A : Optional[Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=SCREAMING_SNAKE_CASE , test_mean_pixel_difference=SCREAMING_SNAKE_CASE , )
| 227 | 1 |
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Dict , lowercase_ :str = "▁" , lowercase_ :bool = True , lowercase_ :Union[str, AddedToken] = "<unk>" , lowercase_ :Union[str, AddedToken] = "</s>" , lowercase_ :Union[str, AddedToken] = "<pad>" , ) -> str:
UpperCAmelCase = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
UpperCAmelCase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
UpperCAmelCase = token_dict['token']
UpperCAmelCase = Tokenizer(Unigram() )
UpperCAmelCase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
UpperCAmelCase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=lowercase_ , add_prefix_space=lowercase_ ),
pre_tokenizers.Digits(individual_digits=lowercase_ ),
pre_tokenizers.Punctuation(),
] )
UpperCAmelCase = decoders.Metaspace(replacement=lowercase_ , add_prefix_space=lowercase_ )
UpperCAmelCase = TemplateProcessing(
single=f"""$A {self.special_tokens['eos']['token']}""" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
UpperCAmelCase = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Union[str, List[str]] , lowercase_ :int = 80_00 , lowercase_ :bool = True , ) -> Union[str, Any]:
UpperCAmelCase = trainers.UnigramTrainer(
vocab_size=lowercase_ , special_tokens=self.special_tokens_list , show_progress=lowercase_ , )
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [files]
self._tokenizer.train(lowercase_ , trainer=lowercase_ )
self.add_unk_id()
def UpperCAmelCase__ ( self :str , lowercase_ :Union[Iterator[str], Iterator[Iterator[str]]] , lowercase_ :int = 80_00 , lowercase_ :bool = True , ) -> Tuple:
UpperCAmelCase = trainers.UnigramTrainer(
vocab_size=lowercase_ , special_tokens=self.special_tokens_list , show_progress=lowercase_ , )
self._tokenizer.train_from_iterator(lowercase_ , trainer=lowercase_ )
self.add_unk_id()
def UpperCAmelCase__ ( self :Union[str, Any] ) -> int:
UpperCAmelCase = json.loads(self._tokenizer.to_str() )
UpperCAmelCase = self.special_tokens['unk']['id']
UpperCAmelCase = Tokenizer.from_str(json.dumps(lowercase_ ) )
| 78 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__UpperCamelCase : Optional[Any] = False
@skip_mps
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = StableDiffusionAttendAndExcitePipeline
UpperCamelCase__ = False
UpperCamelCase__ = TEXT_TO_IMAGE_PARAMS
UpperCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
UpperCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def lowerCamelCase__ ( cls :Union[str, Any] ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(__magic_name__ )
@classmethod
def lowerCamelCase__ ( cls :Union[str, Any] ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(__magic_name__ )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__magic_name__ , )
a = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , )
torch.manual_seed(0 )
a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
a = CLIPTextModel(__magic_name__ )
a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
a = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase__ ( self :Dict , __magic_name__ :Optional[int] , __magic_name__ :Optional[int]=0 ):
'''simple docstring'''
if str(__magic_name__ ).startswith("""mps""" ):
a = torch.manual_seed(__magic_name__ )
else:
a = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
a = a = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = """cpu"""
a = self.get_dummy_components()
a = self.pipeline_class(**__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
a = self.get_dummy_inputs(__magic_name__ )
a = pipe(**__magic_name__ ).images
a = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
a = np.array(
[0.63905364, 0.62897307, 0.48599017, 0.5133624, 0.5550048, 0.45769516, 0.50326973, 0.5023139, 0.45384496] )
a = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__magic_name__ , 1E-3 )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5E-4 )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class __lowerCAmelCase ( unittest.TestCase ):
@classmethod
def lowerCamelCase__ ( cls :int ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(__magic_name__ )
@classmethod
def lowerCamelCase__ ( cls :Any ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(__magic_name__ )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = torch.manual_seed(51 )
a = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=__magic_name__ , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
a = """a painting of an elephant with glasses"""
a = [5, 7]
a = pipe(
prompt=__magic_name__ , token_indices=__magic_name__ , guidance_scale=7.5 , generator=__magic_name__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
a = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5E-1
| 228 | 0 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
lowercase_ = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
lowercase_ = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'{len(upper_files)} files contain uppercase characters:')
print("""\n""".join(upper_files) + """\n""")
lowercase_ = [file for file in filepaths if """ """ in file]
if space_files:
print(F'{len(space_files)} files contain space characters:')
print("""\n""".join(space_files) + """\n""")
lowercase_ = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(F'{len(hyphen_files)} files contain hyphen characters:')
print("""\n""".join(hyphen_files) + """\n""")
lowercase_ = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'{len(nodir_files)} files are not in a directory:')
print("""\n""".join(nodir_files) + """\n""")
lowercase_ = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 224 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowercase_ = logging.get_logger(__name__)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = r"\w+[.]\d+"
lowercase__ = re.findall(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for pat in pats:
lowercase__ = key.replace(SCREAMING_SNAKE_CASE_ , "_".join(pat.split("." ) ) )
return key
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowercase__ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowercase__ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowercase__ = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowercase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowercase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowercase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
lowercase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowercase__ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowercase__ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=42 ):
# Step 1: Convert pytorch tensor to numpy
lowercase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowercase__ = flax_model.init_weights(PRNGKey(SCREAMING_SNAKE_CASE_ ) )
lowercase__ = flatten_dict(SCREAMING_SNAKE_CASE_ )
lowercase__ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase__ = rename_key(SCREAMING_SNAKE_CASE_ )
lowercase__ = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
lowercase__ , lowercase__ = rename_key_and_reshape_tensor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
lowercase__ = jnp.asarray(SCREAMING_SNAKE_CASE_ )
return unflatten_dict(SCREAMING_SNAKE_CASE_ )
| 224 | 1 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class snake_case__ :
"""simple docstring"""
def __init__( self : int, _snake_case : str, _snake_case : List[str]=1_3, _snake_case : Union[str, Any]=7, _snake_case : Tuple=True, _snake_case : str=True, _snake_case : List[Any]=True, _snake_case : List[Any]=True, _snake_case : List[Any]=9_9, _snake_case : Any=6_4, _snake_case : Optional[Any]=5, _snake_case : Any=4, _snake_case : Optional[int]=3_7, _snake_case : Tuple="gelu", _snake_case : Any=0.1, _snake_case : Optional[Any]=0.1, _snake_case : Any=5_1_2, _snake_case : Optional[Any]=1_6, _snake_case : List[str]=2, _snake_case : Dict=0.0_2, _snake_case : Dict=3, _snake_case : int=4, _snake_case : Optional[int]=None, ) ->Any:
snake_case__ : List[Any] = parent
snake_case__ : Tuple = batch_size
snake_case__ : Optional[int] = seq_length
snake_case__ : List[str] = is_training
snake_case__ : Union[str, Any] = use_input_mask
snake_case__ : Tuple = use_token_type_ids
snake_case__ : Union[str, Any] = use_labels
snake_case__ : Union[str, Any] = vocab_size
snake_case__ : List[str] = hidden_size
snake_case__ : Optional[Any] = num_hidden_layers
snake_case__ : Tuple = num_attention_heads
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : Optional[Any] = hidden_act
snake_case__ : Tuple = hidden_dropout_prob
snake_case__ : Tuple = attention_probs_dropout_prob
snake_case__ : Union[str, Any] = max_position_embeddings
snake_case__ : int = type_vocab_size
snake_case__ : List[Any] = type_sequence_label_size
snake_case__ : Any = initializer_range
snake_case__ : str = num_labels
snake_case__ : str = num_choices
snake_case__ : List[Any] = scope
snake_case__ : Any = vocab_size - 1
def lowercase_ ( self : int ) ->int:
snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
snake_case__ : int = None
if self.use_input_mask:
snake_case__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : str = None
if self.use_labels:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
snake_case__ : Tuple = self.get_config()
return config, input_ids, input_mask, token_labels
def lowercase_ ( self : Optional[int] ) ->Optional[Any]:
return GPTNeoXConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=_snake_case, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, )
def lowercase_ ( self : Optional[Any] ) ->Optional[Any]:
snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[str] = self.prepare_config_and_inputs()
snake_case__ : List[Any] = True
return config, input_ids, input_mask, token_labels
def lowercase_ ( self : Any, _snake_case : List[str], _snake_case : Optional[Any], _snake_case : Optional[Any] ) ->List[str]:
snake_case__ : Any = GPTNeoXModel(config=_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Optional[int] = model(_snake_case, attention_mask=_snake_case )
snake_case__ : List[str] = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : Union[str, Any], _snake_case : Optional[Any], _snake_case : Tuple, _snake_case : List[str] ) ->Optional[Any]:
snake_case__ : Any = True
snake_case__ : Union[str, Any] = GPTNeoXModel(_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Any = model(_snake_case, attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : int, _snake_case : int, _snake_case : List[Any], _snake_case : List[str], _snake_case : List[str] ) ->Tuple:
snake_case__ : Dict = GPTNeoXForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : List[str] = model(_snake_case, attention_mask=_snake_case, labels=_snake_case )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : Optional[int], _snake_case : List[str], _snake_case : Union[str, Any], _snake_case : Optional[Any], _snake_case : Any ) ->List[str]:
snake_case__ : Optional[int] = self.num_labels
snake_case__ : int = GPTNeoXForQuestionAnswering(_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : int = model(_snake_case, attention_mask=_snake_case )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase_ ( self : Any, _snake_case : Any, _snake_case : Dict, _snake_case : Optional[Any], _snake_case : str ) ->Union[str, Any]:
snake_case__ : str = self.num_labels
snake_case__ : List[Any] = GPTNeoXForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Optional[int] = ids_tensor([self.batch_size], self.type_sequence_label_size )
snake_case__ : Any = model(_snake_case, attention_mask=_snake_case, labels=_snake_case )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase_ ( self : Dict, _snake_case : str, _snake_case : Optional[Any], _snake_case : Optional[int], _snake_case : List[Any] ) ->str:
snake_case__ : Optional[int] = self.num_labels
snake_case__ : List[str] = GPTNeoXForTokenClassification(_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Tuple = model(_snake_case, attention_mask=_snake_case, labels=_snake_case )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self : Any, _snake_case : List[str], _snake_case : str, _snake_case : int ) ->str:
snake_case__ : Union[str, Any] = True
snake_case__ : Union[str, Any] = GPTNeoXForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
# first forward pass
snake_case__ : Tuple = model(_snake_case, attention_mask=_snake_case, use_cache=_snake_case )
snake_case__ : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : Dict = ids_tensor((self.batch_size, 3), config.vocab_size )
snake_case__ : int = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
snake_case__ : Any = torch.cat([input_ids, next_tokens], dim=-1 )
snake_case__ : Union[str, Any] = torch.cat([input_mask, next_mask], dim=-1 )
snake_case__ : str = model(_snake_case, attention_mask=_snake_case, output_hidden_states=_snake_case )
snake_case__ : int = output_from_no_past['hidden_states'][0]
snake_case__ : Optional[Any] = model(
_snake_case, attention_mask=_snake_case, past_key_values=_snake_case, output_hidden_states=_snake_case, )['hidden_states'][0]
# select random slice
snake_case__ : int = ids_tensor((1,), output_from_past.shape[-1] ).item()
snake_case__ : int = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case, _snake_case, atol=1e-3 ) )
def lowercase_ ( self : str ) ->Union[str, Any]:
snake_case__ : str = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ : int = config_and_inputs
snake_case__ : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class snake_case__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (GPTNeoXForCausalLM,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": GPTNeoXModel,
"""question-answering""": GPTNeoXForQuestionAnswering,
"""text-classification""": GPTNeoXForSequenceClassification,
"""text-generation""": GPTNeoXForCausalLM,
"""token-classification""": GPTNeoXForTokenClassification,
"""zero-shot""": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def lowercase_ ( self : Optional[int] ) ->List[Any]:
snake_case__ : Optional[int] = GPTNeoXModelTester(self )
snake_case__ : Optional[Any] = ConfigTester(self, config_class=_snake_case, hidden_size=6_4, num_attention_heads=8 )
def lowercase_ ( self : List[str] ) ->Any:
self.config_tester.run_common_tests()
def lowercase_ ( self : Union[str, Any] ) ->List[str]:
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_snake_case, _snake_case, _snake_case )
def lowercase_ ( self : Tuple ) ->int:
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_snake_case, _snake_case, _snake_case )
def lowercase_ ( self : Optional[Any] ) ->str:
# This regression test was failing with PyTorch < 1.3
snake_case__ , snake_case__ , snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case__ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(_snake_case, _snake_case, _snake_case )
def lowercase_ ( self : Union[str, Any] ) ->Tuple:
snake_case__ , snake_case__ , snake_case__ , snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_snake_case, _snake_case, _snake_case )
def lowercase_ ( self : Any ) ->List[Any]:
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_snake_case )
def lowercase_ ( self : Tuple ) ->List[str]:
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_snake_case )
def lowercase_ ( self : Tuple ) ->Dict:
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_snake_case )
def lowercase_ ( self : List[str] ) ->int:
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def lowercase_ ( self : Any ) ->List[str]:
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def lowercase_ ( self : List[str], _snake_case : Tuple ) ->List[Any]:
snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : List[Any] = ids_tensor([1, 1_0], config.vocab_size )
snake_case__ : List[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ : Dict = GPTNeoXModel(_snake_case )
original_model.to(_snake_case )
original_model.eval()
snake_case__ : List[str] = original_model(_snake_case ).last_hidden_state
snake_case__ : int = original_model(_snake_case ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ : str = {'type': scaling_type, 'factor': 1_0.0}
snake_case__ : str = GPTNeoXModel(_snake_case )
scaled_model.to(_snake_case )
scaled_model.eval()
snake_case__ : str = scaled_model(_snake_case ).last_hidden_state
snake_case__ : Union[str, Any] = scaled_model(_snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_snake_case, _snake_case, atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_snake_case, _snake_case, atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_snake_case, _snake_case, atol=1e-5 ) )
@require_torch
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self : List[Any] ) ->Union[str, Any]:
snake_case__ : int = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
snake_case__ : Any = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(_snake_case )
snake_case__ : int = tokenizer('My favorite food is', return_tensors='pt' ).to(_snake_case )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
snake_case__ : Union[str, Any] = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
snake_case__ : List[Any] = model.generate(**_snake_case, do_sample=_snake_case, max_new_tokens=2_0 )
snake_case__ : str = tokenizer.batch_decode(_snake_case )[0]
self.assertEqual(_snake_case, _snake_case )
| 277 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
a_ :Optional[Any] = logging.getLogger(__name__)
def lowercase_ (A : List[Any] , A : List[Any] ):
# save results
if os.path.exists(A ):
if os.path.exists(os.path.join(A , 'config.json' ) ) and os.path.isfile(
os.path.join(A , 'config.json' ) ):
os.remove(os.path.join(A , 'config.json' ) )
if os.path.exists(os.path.join(A , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(A , 'pytorch_model.bin' ) ):
os.remove(os.path.join(A , 'pytorch_model.bin' ) )
else:
os.makedirs(A )
model.save_pretrained(A )
def lowercase_ (A : Any , A : Optional[Any]=False ):
snake_case__ : str = 2
if unlogit:
snake_case__ : Dict = torch.pow(A , A )
snake_case__ : Any = p * torch.log(A )
snake_case__ : Tuple = 0
return -plogp.sum(dim=-1 )
def lowercase_ (A : List[str] ):
logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(A ) ) ) )
for row in range(len(A ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def lowercase_ (A : Tuple , A : Optional[Any] , A : str , A : int=True , A : Optional[int]=True , A : Any=None , A : int=False ):
snake_case__ , snake_case__ : Optional[Any] = model.config.num_hidden_layers, model.config.num_attention_heads
snake_case__ : int = torch.zeros(A , A ).to(args.device )
snake_case__ : Any = torch.zeros(A , A ).to(args.device )
if head_mask is None:
snake_case__ : Dict = torch.ones(A , A ).to(args.device )
head_mask.requires_grad_(requires_grad=A )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
snake_case__ : Optional[int] = None
snake_case__ : List[Any] = 0.0
snake_case__ : str = 0.0
for step, inputs in enumerate(tqdm(A , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
snake_case__ : Union[str, Any] = tuple(t.to(args.device ) for t in inputs )
((snake_case__) , ) : Optional[Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
snake_case__ : Union[str, Any] = model(A , labels=A , head_mask=A )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
snake_case__ , snake_case__ , snake_case__ : Dict = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(A ):
snake_case__ : Optional[Any] = entropy(attn.detach() , A )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(A ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
snake_case__ : Union[str, Any] = 2
snake_case__ : List[Any] = torch.pow(torch.pow(A , A ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
snake_case__ : Tuple = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(A )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(A )
logger.info('Head ranked by importance scores' )
snake_case__ : Tuple = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
snake_case__ : Union[str, Any] = torch.arange(
head_importance.numel() , device=args.device )
snake_case__ : str = head_ranks.view_as(A )
print_ad_tensor(A )
return attn_entropy, head_importance, total_loss
def lowercase_ (A : Optional[int] , A : Dict , A : Optional[int] ):
snake_case__ , snake_case__ , snake_case__ : Any = compute_heads_importance(A , A , A , compute_entropy=A )
snake_case__ : Tuple = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , A , original_score * args.masking_threshold )
snake_case__ : Optional[Any] = torch.ones_like(A )
snake_case__ : Union[str, Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
snake_case__ : Dict = original_score
while current_score >= original_score * args.masking_threshold:
snake_case__ : int = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
snake_case__ : List[Any] = float('Inf' )
snake_case__ : Union[str, Any] = head_importance.view(-1 ).sort()[1]
if len(A ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
snake_case__ : int = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
snake_case__ : int = new_head_mask.view(-1 )
snake_case__ : int = 0.0
snake_case__ : Union[str, Any] = new_head_mask.view_as(A )
snake_case__ : List[str] = new_head_mask.clone().detach()
print_ad_tensor(A )
# Compute metric and head importance again
snake_case__ , snake_case__ , snake_case__ : Any = compute_heads_importance(
A , A , A , compute_entropy=A , head_mask=A )
snake_case__ : Dict = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , A , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , )
logger.info('Final head mask' )
print_ad_tensor(A )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowercase_ (A : List[str] , A : Tuple , A : Optional[Any] , A : int ):
snake_case__ : Any = datetime.now()
snake_case__ , snake_case__ , snake_case__ : str = compute_heads_importance(
A , A , A , compute_entropy=A , compute_importance=A , head_mask=A )
snake_case__ : Tuple = 1 / loss
snake_case__ : Dict = datetime.now() - before_time
snake_case__ : Union[str, Any] = sum(p.numel() for p in model.parameters() )
snake_case__ : Optional[Any] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(A ) )
}
for k, v in heads_to_prune.items():
if isinstance(A , A ):
snake_case__ : Any = [
v,
]
assert sum(len(A ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(A )
snake_case__ : Dict = sum(p.numel() for p in model.parameters() )
snake_case__ : Tuple = datetime.now()
snake_case__ , snake_case__ , snake_case__ : Dict = compute_heads_importance(
A , A , A , compute_entropy=A , compute_importance=A , head_mask=A , actually_pruned=A , )
snake_case__ : Any = 1 / loss
snake_case__ : int = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , A , A , pruned_num_params / original_num_params * 1_0_0 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , A , A )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_0_0 )
save_model(A , args.output_dir )
def lowercase_ ():
snake_case__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=A , type=A , required=A , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=A , type=A , required=A , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=A , type=A , required=A , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=A , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=A , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=A , type=A , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=A , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=A , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=A , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=A , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_2_8 , type=A , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=A , help='Batch size.' )
parser.add_argument('--seed' , type=A , default=4_2 )
parser.add_argument('--local_rank' , type=A , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=A , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=A , default='' , help='Can be used for distant debugging.' )
snake_case__ : Optional[int] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=A )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
snake_case__ : List[Any] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
snake_case__ : Optional[Any] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
snake_case__ : int = torch.device('cuda' , args.local_rank )
snake_case__ : List[str] = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
snake_case__ : Any = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
snake_case__ : List[str] = nn.parallel.DistributedDataParallel(
A , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=A )
elif args.n_gpu > 1:
snake_case__ : Optional[int] = nn.DataParallel(A )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=A )
torch.save(A , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , A )
# Prepare dataset
snake_case__ : Optional[Any] = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
snake_case__ : List[str] = (torch.from_numpy(A ),)
snake_case__ : int = TensorDataset(*A )
snake_case__ : Union[str, Any] = RandomSampler(A )
snake_case__ : Any = DataLoader(A , sampler=A , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(A , A , A )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
snake_case__ : Dict = mask_heads(A , A , A )
prune_heads(A , A , A , A )
if __name__ == "__main__":
main()
| 277 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_A : str =logging.get_logger(__name__)
_A : str ={
"nielsr/canine-s": 2_048,
}
# Unicode defines 1,114,112 total “codepoints”
_A : List[str] =1_114_112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_A : Any =0
_A : Tuple =0xe_0_0_0
_A : Optional[Any] =0xe_0_0_1
_A : Tuple =0xe_0_0_2
_A : Union[str, Any] =0xe_0_0_3
_A : Dict =0xe_0_0_4
# Maps special codepoints to human-readable names.
_A : Dict[int, str] ={
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_A : Dict[str, int] ={name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class _lowercase ( snake_case__ ):
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self: Dict , UpperCamelCase__: int=chr(UpperCAmelCase_ ) , UpperCamelCase__: List[str]=chr(UpperCAmelCase_ ) , UpperCamelCase__: Union[str, Any]=chr(UpperCAmelCase_ ) , UpperCamelCase__: int=chr(UpperCAmelCase_ ) , UpperCamelCase__: List[Any]=chr(UpperCAmelCase_ ) , UpperCamelCase__: Any=chr(UpperCAmelCase_ ) , UpperCamelCase__: int=False , UpperCamelCase__: Optional[Any]=2_048 , **UpperCamelCase__: List[str] , ):
lowerCamelCase__ : Any = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else bos_token
lowerCamelCase__ : int = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else eos_token
lowerCamelCase__ : Union[str, Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else sep_token
lowerCamelCase__ : Tuple = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else cls_token
lowerCamelCase__ : List[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ : List[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , model_max_length=UpperCAmelCase_ , **UpperCAmelCase_ , )
# Creates a mapping for looking up the IDs of special symbols.
lowerCamelCase__ : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
lowerCamelCase__ : Optional[Any] = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
lowerCamelCase__ : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
lowerCamelCase__ : List[Any] = UNICODE_VOCAB_SIZE
lowerCamelCase__ : str = len(self._special_codepoints )
@property
def lowerCamelCase_ ( self: int ):
return self._unicode_vocab_size
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: str ):
return list(UpperCAmelCase_ )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: str ):
try:
return ord(UpperCAmelCase_ )
except TypeError:
raise ValueError(F'''invalid token: \'{token}\'''' )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: int ):
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(UpperCAmelCase_ )
except TypeError:
raise ValueError(F'''invalid id: {index}''' )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: Optional[Any] ):
return "".join(UpperCAmelCase_ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: List[int] , UpperCamelCase__: Optional[List[int]] = None ):
lowerCamelCase__ : Any = [self.sep_token_id]
lowerCamelCase__ : List[str] = [self.cls_token_id]
lowerCamelCase__ : Union[str, Any] = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: List[int] , UpperCamelCase__: Optional[List[int]] = None , UpperCamelCase__: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
lowerCamelCase__ : List[str] = [1] + ([0] * len(UpperCAmelCase_ )) + [1]
if token_ids_a is not None:
result += ([0] * len(UpperCAmelCase_ )) + [1]
return result
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: List[int] , UpperCamelCase__: Optional[List[int]] = None ):
lowerCamelCase__ : List[str] = [self.sep_token_id]
lowerCamelCase__ : List[Any] = [self.cls_token_id]
lowerCamelCase__ : Optional[Any] = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def lowerCamelCase_ ( self: Any , UpperCamelCase__: str , UpperCamelCase__: Optional[str] = None ):
return ()
| 365 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _lowercase ( _lowercase ):
pass
class _lowercase :
def __init__( self: Optional[int] , UpperCamelCase__: Any ):
lowerCamelCase__ : Any = data
lowerCamelCase__ : Node | None = None
def __iter__( self: List[Any] ):
lowerCamelCase__ : Optional[Any] = self
lowerCamelCase__ : int = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(UpperCamelCase__ )
yield node.data
lowerCamelCase__ : List[str] = node.next_node
@property
def lowerCamelCase_ ( self: Optional[Any] ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
_A : Any =Node(1)
_A : Optional[int] =Node(2)
_A : Dict =Node(3)
_A : Optional[Any] =Node(4)
print(root_node.has_loop) # False
_A : Any =root_node.next_node
print(root_node.has_loop) # True
_A : Dict =Node(5)
_A : Union[str, Any] =Node(6)
_A : str =Node(5)
_A : int =Node(6)
print(root_node.has_loop) # False
_A : Optional[Any] =Node(1)
print(root_node.has_loop) # False
| 129 | 0 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
snake_case : List[str] = logging.get_logger(__name__)
class snake_case_ (lowerCAmelCase_ ):
def __init__( self :Tuple ,*__snake_case :Optional[int] ,**__snake_case :Any ) -> None:
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' ,__snake_case ,)
super().__init__(*__snake_case ,**__snake_case )
| 240 |
import argparse
import os
import re
import packaging.version
_A : Optional[int] = 'examples/'
_A : str = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
_A : Any = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
_A : List[str] = 'README.md'
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
"""simple docstring"""
with open(UpperCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase__ : Tuple = f.read()
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = REPLACE_PATTERNS[pattern]
lowerCamelCase__ : Union[str, Any] = replace.replace('''VERSION''' , UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = re_pattern.sub(UpperCAmelCase , UpperCAmelCase )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(UpperCAmelCase )
def _a ( UpperCAmelCase ) -> Dict:
"""simple docstring"""
for folder, directories, fnames in os.walk(UpperCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase , pattern='''examples''' )
def _a ( UpperCAmelCase , UpperCAmelCase=False ) -> Dict:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if not patch:
update_version_in_examples(UpperCAmelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : Any = '''🤗 Transformers currently provides the following architectures'''
lowerCamelCase__ : Dict = '''1. Want to contribute a new model?'''
with open(UpperCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase__ : str = f.readlines()
# Find the start of the list.
lowerCamelCase__ : int = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase__ : str = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
lowerCamelCase__ : Any = lines[index].replace(
'''https://huggingface.co/docs/diffusers/main/model_doc''' , '''https://huggingface.co/docs/diffusers/model_doc''' , )
index += 1
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(UpperCAmelCase )
def _a ( ) -> Any:
"""simple docstring"""
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
lowerCamelCase__ : List[str] = f.read()
lowerCamelCase__ : Any = REPLACE_PATTERNS['''init'''][0].search(UpperCAmelCase ).groups()[0]
return packaging.version.parse(UpperCAmelCase )
def _a ( UpperCAmelCase=False ) -> str:
"""simple docstring"""
lowerCamelCase__ : List[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
lowerCamelCase__ : Union[str, Any] = default_version.base_version
elif patch:
lowerCamelCase__ : str = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
lowerCamelCase__ : Dict = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
lowerCamelCase__ : str = input(f"Which version are you releasing? [{default_version}]" )
if len(UpperCAmelCase ) == 0:
lowerCamelCase__ : int = default_version
print(f"Updating version to {version}." )
global_version_update(UpperCAmelCase , patch=UpperCAmelCase )
def _a ( ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ : List[str] = get_version()
lowerCamelCase__ : Optional[int] = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
lowerCamelCase__ : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
lowerCamelCase__ : Dict = input(f"Which version are we developing now? [{dev_version}]" )
if len(UpperCAmelCase ) == 0:
lowerCamelCase__ : Tuple = dev_version
print(f"Updating version to {version}." )
global_version_update(UpperCAmelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
_A : Any = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
_A : Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 142 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=7 , __a=6 , __a=17 , __a=23 , __a=11 , __a=True , ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = act_dim
_UpperCamelCase = state_dim
_UpperCamelCase = hidden_size
_UpperCamelCase = max_length
_UpperCamelCase = is_training
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = floats_tensor((self.batch_size, self.seq_length, self.state_dim))
_UpperCamelCase = floats_tensor((self.batch_size, self.seq_length, self.act_dim))
_UpperCamelCase = floats_tensor((self.batch_size, self.seq_length, 1))
_UpperCamelCase = floats_tensor((self.batch_size, self.seq_length, 1))
_UpperCamelCase = ids_tensor((self.batch_size, self.seq_length) , vocab_size=10_00)
_UpperCamelCase = random_attention_mask((self.batch_size, self.seq_length))
_UpperCamelCase = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = DecisionTransformerModel(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , __a , __a , __a , __a , __a)
self.parent.assertEqual(result.state_preds.shape , states.shape)
self.parent.assertEqual(result.action_preds.shape , actions.shape)
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size)) # seq length *3 as there are 3 modelities: states, returns and actions
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (DecisionTransformerModel,) if is_torch_available() else ()
lowercase__ = ()
lowercase__ = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
lowercase__ = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = DecisionTransformerModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , hidden_size=37)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
@slow
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = DecisionTransformerModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(__a)] , __a)
@require_torch
class _UpperCAmelCase( unittest.TestCase ):
@slow
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = 2 # number of steps of autoregressive prediction we will perform
_UpperCamelCase = 10 # defined by the RL environment, may be normalized
_UpperCamelCase = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''')
_UpperCamelCase = model.to(__a)
_UpperCamelCase = model.config
torch.manual_seed(0)
_UpperCamelCase = torch.randn(1 , 1 , config.state_dim).to(device=__a , dtype=torch.floataa) # env.reset()
_UpperCamelCase = torch.tensor(
[[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=__a)
_UpperCamelCase = torch.tensor(__a , device=__a , dtype=torch.floataa).reshape(1 , 1 , 1)
_UpperCamelCase = state
_UpperCamelCase = torch.zeros(1 , 0 , config.act_dim , device=__a , dtype=torch.floataa)
_UpperCamelCase = torch.zeros(1 , 0 , device=__a , dtype=torch.floataa)
_UpperCamelCase = torch.tensor(0 , device=__a , dtype=torch.long).reshape(1 , 1)
for step in range(__a):
_UpperCamelCase = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=__a)] , dim=1)
_UpperCamelCase = torch.cat([rewards, torch.zeros(1 , 1 , device=__a)] , dim=1)
_UpperCamelCase = torch.ones(1 , states.shape[1]).to(dtype=torch.long , device=states.device)
with torch.no_grad():
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = model(
states=__a , actions=__a , rewards=__a , returns_to_go=__a , timesteps=__a , attention_mask=__a , return_dict=__a , )
self.assertEqual(action_pred.shape , actions.shape)
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4))
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim).to(device=__a , dtype=torch.floataa),
1.0,
False,
{},
)
_UpperCamelCase = action_pred[0, -1]
_UpperCamelCase = torch.cat([states, state] , dim=1)
_UpperCamelCase = returns_to_go[0, -1] - reward
_UpperCamelCase = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1)] , dim=1)
_UpperCamelCase = torch.cat(
[timesteps, torch.ones((1, 1) , device=__a , dtype=torch.long) * (step + 1)] , dim=1)
| 100 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"""facebook/levit-128S""": """https://huggingface.co/facebook/levit-128S/resolve/main/config.json""",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'levit'
def __init__( self , __a=2_24 , __a=3 , __a=3 , __a=2 , __a=1 , __a=16 , __a=[1_28, 2_56, 3_84] , __a=[4, 8, 12] , __a=[4, 4, 4] , __a=[16, 16, 16] , __a=0 , __a=[2, 2, 2] , __a=[2, 2, 2] , __a=0.02 , **__a , ) -> List[str]:
'''simple docstring'''
super().__init__(**__a)
_UpperCamelCase = image_size
_UpperCamelCase = num_channels
_UpperCamelCase = kernel_size
_UpperCamelCase = stride
_UpperCamelCase = padding
_UpperCamelCase = hidden_sizes
_UpperCamelCase = num_attention_heads
_UpperCamelCase = depths
_UpperCamelCase = key_dim
_UpperCamelCase = drop_path_rate
_UpperCamelCase = patch_size
_UpperCamelCase = attention_ratio
_UpperCamelCase = mlp_ratio
_UpperCamelCase = initializer_range
_UpperCamelCase = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = version.parse('1.11' )
@property
def UpperCAmelCase ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def UpperCAmelCase ( self) -> float:
'''simple docstring'''
return 1e-4
| 100 | 1 |
"""simple docstring"""
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( _lowerCamelCase):
A_ : Dict = (DDPMParallelScheduler,)
def __lowerCamelCase ( self , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = {
'num_train_timesteps': 10_00,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def __lowerCamelCase ( self ):
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE , beta_end=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
self.check_over_configs(thresholding=_SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , sample_max_value=_SCREAMING_SNAKE_CASE , )
def __lowerCamelCase ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
__lowerCAmelCase : Dict = self.get_scheduler_config()
__lowerCAmelCase : Any = scheduler_class(**_SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1E-5
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
__lowerCAmelCase : str = self.get_scheduler_config()
__lowerCAmelCase : Any = scheduler_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = self.dummy_model()
__lowerCAmelCase : List[Any] = self.dummy_sample_deter
__lowerCAmelCase : int = self.dummy_sample_deter + 0.1
__lowerCAmelCase : Tuple = self.dummy_sample_deter - 0.1
__lowerCAmelCase : Tuple = samplea.shape[0]
__lowerCAmelCase : Tuple = torch.stack([samplea, samplea, samplea] , dim=0 )
__lowerCAmelCase : str = torch.arange(_SCREAMING_SNAKE_CASE )[0:3, None].repeat(1 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__lowerCAmelCase : Tuple = scheduler.batch_step_no_noise(_SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
__lowerCAmelCase : Optional[Any] = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : int = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1153.1833 ) < 1E-2
assert abs(result_mean.item() - 0.5005 ) < 1E-3
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
__lowerCAmelCase : Any = self.get_scheduler_config()
__lowerCAmelCase : Optional[Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = len(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self.dummy_model()
__lowerCAmelCase : List[str] = self.dummy_sample_deter
__lowerCAmelCase : str = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
__lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
__lowerCAmelCase : Tuple = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
__lowerCAmelCase : Any = pred_prev_sample
__lowerCAmelCase : Union[str, Any] = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : List[str] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = self.scheduler_classes[0]
__lowerCAmelCase : str = self.get_scheduler_config(prediction_type='v_prediction' )
__lowerCAmelCase : Tuple = scheduler_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = len(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = self.dummy_model()
__lowerCAmelCase : Any = self.dummy_sample_deter
__lowerCAmelCase : int = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
__lowerCAmelCase : List[Any] = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
__lowerCAmelCase : int = pred_prev_sample
__lowerCAmelCase : int = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Optional[Any] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.scheduler_classes[0]
__lowerCAmelCase : Optional[Any] = self.get_scheduler_config()
__lowerCAmelCase : Tuple = scheduler_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = scheduler.timesteps
for i, timestep in enumerate(_SCREAMING_SNAKE_CASE ):
if i == len(_SCREAMING_SNAKE_CASE ) - 1:
__lowerCAmelCase : Optional[int] = -1
else:
__lowerCAmelCase : Dict = timesteps[i + 1]
__lowerCAmelCase : Optional[int] = scheduler.previous_timestep(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = prev_t.item()
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
__lowerCAmelCase : List[Any] = self.get_scheduler_config()
__lowerCAmelCase : Union[str, Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = [1_00, 87, 50, 51, 0]
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
__lowerCAmelCase : List[str] = self.get_scheduler_config()
__lowerCAmelCase : Optional[Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = [1_00, 87, 50, 1, 0]
__lowerCAmelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
__lowerCAmelCase : Any = self.get_scheduler_config()
__lowerCAmelCase : Optional[int] = scheduler_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_SCREAMING_SNAKE_CASE , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE ) | 86 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__snake_case =logging.get_logger("""transformers.models.encodec""")
__snake_case ={
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
__snake_case ={
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
__snake_case ={
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
__snake_case ={
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
__snake_case ={
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
__snake_case ={
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__snake_case ={
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__snake_case =[]
__snake_case =[]
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : List[str] ):
for attribute in key.split('.' ):
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
if weight_type is not None:
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase ).shape
else:
lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowerCAmelCase = value
elif weight_type == "weight_g":
lowerCAmelCase = value
elif weight_type == "weight_v":
lowerCAmelCase = value
elif weight_type == "bias":
lowerCAmelCase = value
elif weight_type == "running_mean":
lowerCAmelCase = value
elif weight_type == "running_var":
lowerCAmelCase = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase = value
elif weight_type == "weight_ih_l0":
lowerCAmelCase = value
elif weight_type == "weight_hh_l0":
lowerCAmelCase = value
elif weight_type == "bias_ih_l0":
lowerCAmelCase = value
elif weight_type == "bias_hh_l0":
lowerCAmelCase = value
elif weight_type == "weight_ih_l1":
lowerCAmelCase = value
elif weight_type == "weight_hh_l1":
lowerCAmelCase = value
elif weight_type == "bias_ih_l1":
lowerCAmelCase = value
elif weight_type == "bias_hh_l1":
lowerCAmelCase = value
else:
lowerCAmelCase = value
logger.info(f'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] ):
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCAmelCase , lowerCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : str ):
lowerCAmelCase = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCAmelCase = MAPPING_24K
elif model_name == "encodec_48khz":
lowerCAmelCase = MAPPING_48K
else:
raise ValueError(f'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase , lowerCamelCase ):
logger.info(f'''{name} was ignored''' )
continue
lowerCAmelCase = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCAmelCase , lowerCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
lowerCAmelCase = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
lowerCAmelCase = True
if "*" in mapped_key:
lowerCAmelCase = name.split(lowerCamelCase )[0].split('.' )[-2]
lowerCAmelCase = mapped_key.replace('*' , lowerCamelCase )
if "weight_g" in name:
lowerCAmelCase = 'weight_g'
elif "weight_v" in name:
lowerCAmelCase = 'weight_v'
elif "weight_ih_l0" in name:
lowerCAmelCase = 'weight_ih_l0'
elif "weight_hh_l0" in name:
lowerCAmelCase = 'weight_hh_l0'
elif "bias_ih_l0" in name:
lowerCAmelCase = 'bias_ih_l0'
elif "bias_hh_l0" in name:
lowerCAmelCase = 'bias_hh_l0'
elif "weight_ih_l1" in name:
lowerCAmelCase = 'weight_ih_l1'
elif "weight_hh_l1" in name:
lowerCAmelCase = 'weight_hh_l1'
elif "bias_ih_l1" in name:
lowerCAmelCase = 'bias_ih_l1'
elif "bias_hh_l1" in name:
lowerCAmelCase = 'bias_hh_l1'
elif "bias" in name:
lowerCAmelCase = 'bias'
elif "weight" in name:
lowerCAmelCase = 'weight'
elif "running_mean" in name:
lowerCAmelCase = 'running_mean'
elif "running_var" in name:
lowerCAmelCase = 'running_var'
elif "num_batches_tracked" in name:
lowerCAmelCase = 'num_batches_tracked'
else:
lowerCAmelCase = None
set_recursively(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
continue
if not is_used:
unused_weights.append(lowerCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Dict=None , lowerCamelCase : Union[str, Any]=None , ):
if config_path is not None:
lowerCAmelCase = EncodecConfig.from_pretrained(lowerCamelCase )
else:
lowerCAmelCase = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCAmelCase = [8, 5, 4, 4]
lowerCAmelCase = [2.2]
lowerCAmelCase = 64
lowerCAmelCase = 32000
lowerCAmelCase = 2048
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
elif model_name == "encodec_48khz":
lowerCAmelCase = [8, 5, 4, 2]
lowerCAmelCase = [3.0, 6.0, 12.0, 24.0]
lowerCAmelCase = 48000
lowerCAmelCase = 2
lowerCAmelCase = False
lowerCAmelCase = 'time_group_norm'
lowerCAmelCase = True
lowerCAmelCase = 1.0
lowerCAmelCase = 0.01
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase = EncodecModel(lowerCamelCase )
lowerCAmelCase = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowerCamelCase )
lowerCAmelCase = torch.load(lowerCamelCase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCAmelCase = original_checkpoint['best_state']
recursively_load_weights(lowerCamelCase , lowerCamelCase , lowerCamelCase )
model.save_pretrained(lowerCamelCase )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(lowerCamelCase )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__snake_case =parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 4 | 0 |
from __future__ import annotations
lowercase_ = 8.988E9 # units = N * m^s * C^-2
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Any = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if distance < 0:
raise ValueError('Distance cannot be negative' )
if force == 0:
__lowerCamelCase : Optional[Any] = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
__lowerCamelCase : Optional[Any] = abs(SCREAMING_SNAKE_CASE__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
__lowerCamelCase : List[str] = abs(SCREAMING_SNAKE_CASE__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
__lowerCamelCase : str = (COULOMBS_CONSTANT * charge_product / abs(SCREAMING_SNAKE_CASE__ )) ** 0.5
return {"distance": distance}
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 194 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Any = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__lowerCamelCase : Dict = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__lowerCamelCase : Union[str, Any] = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__lowerCamelCase : Tuple = subset[i - 1][j]
if arr[i - 1] <= j:
__lowerCamelCase : List[Any] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 194 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCamelCase ( snake_case_ ):
def __init__( self : Any , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Tuple ) -> None:
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 294 |
"""simple docstring"""
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
'''simple docstring'''
_a : Optional[Any] = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("""All input parameters must be positive""" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("""Relative densities cannot be greater than one""" )
else:
_a : Tuple = 1 - (matter_density + radiation_density + dark_energy)
_a : int = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
_a : List[str] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
_snake_case = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 294 | 1 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def lowerCAmelCase__ ( _UpperCamelCase : int , _UpperCamelCase : int ) -> bool:
"""simple docstring"""
return (
num != den and num % 1_0 == den // 1_0 and (num // 1_0) / (den % 1_0) == num / den
)
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> list[str]:
"""simple docstring"""
snake_case = []
snake_case = 1_1
snake_case = int('1' + '0' * digit_len )
for num in range(_UpperCamelCase , _UpperCamelCase ):
while den <= 9_9:
if (num != den) and (num % 1_0 == den // 1_0) and (den % 1_0 != 0):
if is_digit_cancelling(_UpperCamelCase , _UpperCamelCase ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
snake_case = 1_0
return solutions
def lowerCAmelCase__ ( _UpperCamelCase : int = 2 ) -> int:
"""simple docstring"""
snake_case = 1.0
for fraction in fraction_list(_UpperCamelCase ):
snake_case = Fraction(_UpperCamelCase )
result *= frac.denominator / frac.numerator
return int(_UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 362 | """simple docstring"""
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def lowerCAmelCase__ ( _UpperCamelCase : Any ) -> int:
"""simple docstring"""
snake_case = {}
snake_case = job['started_at']
snake_case = job['completed_at']
snake_case = date_parser.parse(_UpperCamelCase )
snake_case = date_parser.parse(_UpperCamelCase )
snake_case = round((end_datetime - start_datetime).total_seconds() / 60.0 )
snake_case = start
snake_case = end
snake_case = duration_in_min
return job_info
def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : Any=None ) -> Union[str, Any]:
"""simple docstring"""
snake_case = None
if token is not None:
snake_case = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""}
snake_case = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
snake_case = requests.get(_UpperCamelCase , headers=_UpperCamelCase ).json()
snake_case = {}
try:
job_time.update({job['name']: extract_time_from_single_job(_UpperCamelCase ) for job in result['jobs']} )
snake_case = math.ceil((result['total_count'] - 1_0_0) / 1_0_0 )
for i in range(_UpperCamelCase ):
snake_case = requests.get(url + f"""&page={i + 2}""" , headers=_UpperCamelCase ).json()
job_time.update({job['name']: extract_time_from_single_job(_UpperCamelCase ) for job in result['jobs']} )
return job_time
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = get_job_time(args.workflow_run_id)
SCREAMING_SNAKE_CASE__ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"""{k}: {v['duration']}""")
| 149 | 0 |
'''simple docstring'''
__lowerCAmelCase = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__lowerCAmelCase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__lowerCAmelCase = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 89 | """simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowercase__ = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __a ( _SCREAMING_SNAKE_CASE ) ->Any:
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
return image
elif isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
a__: Optional[int] = [image]
a__: str = [trans(img.convert('RGB' ) ) for img in image]
a__: Any = torch.stack(_SCREAMING_SNAKE_CASE )
return image
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
a__: Dict = DDIMScheduler.from_config(scheduler.config)
self.register_modules(unet=lowercase , scheduler=lowercase)
def lowerCamelCase_ ( self , lowercase) -> int:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}')
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> Dict:
'''simple docstring'''
a__: int = min(int(num_inference_steps * strength) , lowercase)
a__: Any = max(num_inference_steps - init_timestep , 0)
a__: Union[str, Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=None) -> List[Any]:
'''simple docstring'''
if not isinstance(lowercase , (torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase)}')
a__: Tuple = image.to(device=lowercase , dtype=lowercase)
if isinstance(lowercase , lowercase) and len(lowercase) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(lowercase)}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.')
a__: List[str] = init_latents.shape
a__: List[Any] = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase)
# get latents
print('add noise to latents at timestep' , lowercase)
a__: int = self.scheduler.add_noise(lowercase , lowercase , lowercase)
a__: Dict = init_latents
return latents
@torch.no_grad()
def __call__( self , lowercase = None , lowercase = 0.8 , lowercase = 1 , lowercase = None , lowercase = 0.0 , lowercase = 50 , lowercase = None , lowercase = "pil" , lowercase = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(lowercase)
# 2. Preprocess image
a__: Tuple = preprocess(lowercase)
# 3. set timesteps
self.scheduler.set_timesteps(lowercase , device=self.device)
a__ , a__: Union[str, Any] = self.get_timesteps(lowercase , lowercase , self.device)
a__: Optional[int] = timesteps[:1].repeat(lowercase)
# 4. Prepare latent variables
a__: Union[str, Any] = self.prepare_latents(lowercase , lowercase , lowercase , self.unet.dtype , self.device , lowercase)
a__: Optional[Any] = latents
# 5. Denoising loop
for t in self.progress_bar(lowercase):
# 1. predict noise model_output
a__: Dict = self.unet(lowercase , lowercase).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
a__: Optional[Any] = self.scheduler.step(
lowercase , lowercase , lowercase , eta=lowercase , use_clipped_model_output=lowercase , generator=lowercase , ).prev_sample
a__: Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1)
a__: Optional[int] = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
a__: Dict = self.numpy_to_pil(lowercase)
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowercase)
| 290 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCAmelCase__ : Any =None
UpperCAmelCase__ : List[str] =logging.get_logger(__name__)
UpperCAmelCase__ : Tuple ={'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase__ : Dict ={
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase__ : Optional[int] ={
'''facebook/nllb-large-en-ro''': 10_24,
'''facebook/nllb-200-distilled-600M''': 10_24,
}
# fmt: off
UpperCAmelCase__ : Dict =['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __A ( a ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = ["""input_ids""", """attention_mask"""]
__A = NllbTokenizer
__A = []
__A = []
def __init__( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_="<s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="<s>" , UpperCAmelCase_="<unk>" , UpperCAmelCase_="<pad>" , UpperCAmelCase_="<mask>" , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=False , **UpperCAmelCase_ , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase =AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
lowerCamelCase =legacy_behaviour
super().__init__(
vocab_file=UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , src_lang=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , legacy_behaviour=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase =vocab_file
lowerCamelCase =False if not self.vocab_file else True
lowerCamelCase =FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
lowerCamelCase ={
lang_code: self.convert_tokens_to_ids(UpperCAmelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase =src_lang if src_lang is not None else """eng_Latn"""
lowerCamelCase =self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase =tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _snake_case ( self ):
return self._src_lang
@src_lang.setter
def _snake_case ( self , UpperCAmelCase_ ):
lowerCamelCase =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
lowerCamelCase =[self.sep_token_id]
lowerCamelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
lowerCamelCase =src_lang
lowerCamelCase =self(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCamelCase =self.convert_tokens_to_ids(UpperCAmelCase_ )
lowerCamelCase =tgt_lang_id
return inputs
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = "eng_Latn" , UpperCAmelCase_ = None , UpperCAmelCase_ = "fra_Latn" , **UpperCAmelCase_ , ):
lowerCamelCase =src_lang
lowerCamelCase =tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def _snake_case ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def _snake_case ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _snake_case ( self , UpperCAmelCase_ ):
lowerCamelCase =self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
lowerCamelCase =[]
lowerCamelCase =[self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase =[self.cur_lang_code]
lowerCamelCase =[self.eos_token_id]
lowerCamelCase =self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase =self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase =processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _snake_case ( self , UpperCAmelCase_ ):
lowerCamelCase =self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
lowerCamelCase =[]
lowerCamelCase =[self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase =[self.cur_lang_code]
lowerCamelCase =[self.eos_token_id]
lowerCamelCase =self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase =self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase =processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
lowerCamelCase =os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
return (out_vocab_file,)
| 262 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter:
lowerCamelCase =tau * frequency / samplerate
lowerCamelCase =sin(_UpperCAmelCase )
lowerCamelCase =cos(_UpperCAmelCase )
lowerCamelCase =_sin / (2 * q_factor)
lowerCamelCase =(1 - _cos) / 2
lowerCamelCase =1 - _cos
lowerCamelCase =1 + alpha
lowerCamelCase =-2 * _cos
lowerCamelCase =1 - alpha
lowerCamelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter:
lowerCamelCase =tau * frequency / samplerate
lowerCamelCase =sin(_UpperCAmelCase )
lowerCamelCase =cos(_UpperCAmelCase )
lowerCamelCase =_sin / (2 * q_factor)
lowerCamelCase =(1 + _cos) / 2
lowerCamelCase =-1 - _cos
lowerCamelCase =1 + alpha
lowerCamelCase =-2 * _cos
lowerCamelCase =1 - alpha
lowerCamelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter:
lowerCamelCase =tau * frequency / samplerate
lowerCamelCase =sin(_UpperCAmelCase )
lowerCamelCase =cos(_UpperCAmelCase )
lowerCamelCase =_sin / (2 * q_factor)
lowerCamelCase =_sin / 2
lowerCamelCase =0
lowerCamelCase =-ba
lowerCamelCase =1 + alpha
lowerCamelCase =-2 * _cos
lowerCamelCase =1 - alpha
lowerCamelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter:
lowerCamelCase =tau * frequency / samplerate
lowerCamelCase =sin(_UpperCAmelCase )
lowerCamelCase =cos(_UpperCAmelCase )
lowerCamelCase =_sin / (2 * q_factor)
lowerCamelCase =1 - alpha
lowerCamelCase =-2 * _cos
lowerCamelCase =1 + alpha
lowerCamelCase =IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
lowerCamelCase =tau * frequency / samplerate
lowerCamelCase =sin(_UpperCAmelCase )
lowerCamelCase =cos(_UpperCAmelCase )
lowerCamelCase =_sin / (2 * q_factor)
lowerCamelCase =10 ** (gain_db / 40)
lowerCamelCase =1 + alpha * big_a
lowerCamelCase =-2 * _cos
lowerCamelCase =1 - alpha * big_a
lowerCamelCase =1 + alpha / big_a
lowerCamelCase =-2 * _cos
lowerCamelCase =1 - alpha / big_a
lowerCamelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
lowerCamelCase =tau * frequency / samplerate
lowerCamelCase =sin(_UpperCAmelCase )
lowerCamelCase =cos(_UpperCAmelCase )
lowerCamelCase =_sin / (2 * q_factor)
lowerCamelCase =10 ** (gain_db / 40)
lowerCamelCase =(big_a + 1) - (big_a - 1) * _cos
lowerCamelCase =(big_a + 1) + (big_a - 1) * _cos
lowerCamelCase =(big_a - 1) - (big_a + 1) * _cos
lowerCamelCase =(big_a - 1) + (big_a + 1) * _cos
lowerCamelCase =2 * sqrt(_UpperCAmelCase ) * alpha
lowerCamelCase =big_a * (pmc + aaa)
lowerCamelCase =2 * big_a * mpc
lowerCamelCase =big_a * (pmc - aaa)
lowerCamelCase =ppmc + aaa
lowerCamelCase =-2 * pmpc
lowerCamelCase =ppmc - aaa
lowerCamelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
lowerCamelCase =tau * frequency / samplerate
lowerCamelCase =sin(_UpperCAmelCase )
lowerCamelCase =cos(_UpperCAmelCase )
lowerCamelCase =_sin / (2 * q_factor)
lowerCamelCase =10 ** (gain_db / 40)
lowerCamelCase =(big_a + 1) - (big_a - 1) * _cos
lowerCamelCase =(big_a + 1) + (big_a - 1) * _cos
lowerCamelCase =(big_a - 1) - (big_a + 1) * _cos
lowerCamelCase =(big_a - 1) + (big_a + 1) * _cos
lowerCamelCase =2 * sqrt(_UpperCAmelCase ) * alpha
lowerCamelCase =big_a * (ppmc + aaa)
lowerCamelCase =-2 * big_a * pmpc
lowerCamelCase =big_a * (ppmc - aaa)
lowerCamelCase =pmc + aaa
lowerCamelCase =2 * mpc
lowerCamelCase =pmc - aaa
lowerCamelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 262 | 1 |
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def _snake_case ( lowerCamelCase__ : str , lowerCamelCase__ : Any , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
lowerCamelCase_ : Dict =sorted(zip(__UpperCamelCase , __UpperCamelCase ) , key=lambda lowerCamelCase__ : x[0] / x[1] , reverse=__UpperCamelCase )
lowerCamelCase_ , lowerCamelCase_ : Any =[i[0] for i in r], [i[1] for i in r]
lowerCamelCase_ : Union[str, Any] =list(accumulate(__UpperCamelCase ) )
lowerCamelCase_ : Optional[Any] =bisect(__UpperCamelCase , __UpperCamelCase )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 144 |
def __snake_case ( __UpperCamelCase : bytes ):
"""simple docstring"""
return "".join([hex(__UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(__UpperCamelCase )] )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
if (len(__UpperCamelCase ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__UpperCamelCase ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] ,16 ) for i in range(0 ,len(__UpperCamelCase ) ,2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 312 | 0 |
from __future__ import annotations
import requests
def a( A : str ) -> dict:
"""simple docstring"""
a = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(A ).json()
def a( A : int = 10 ) -> list[dict]:
"""simple docstring"""
a = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
a = requests.get(A ).json()[:max_stories]
return [get_hackernews_story(A ) for story_id in story_ids]
def a( A : int = 10 ) -> str:
"""simple docstring"""
a = hackernews_top_stories(A )
return "\n".join("* [{title}]({url})".format(**A ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 71 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
_lowercase: Any = logging.get_logger(__name__)
def a( A : bool , A : bool ) -> List[str]:
"""simple docstring"""
def run_func(A : Union[str, Any] ):
@wraps(A )
def run_in_eager_mode(*A : int , **A : List[str] ):
return func(*A , **A )
@wraps(A )
@tf.function(experimental_compile=A )
def run_in_graph_mode(*A : List[str] , **A : Optional[int] ):
return func(*A , **A )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def a( A : int , A : int , A : int ) -> ["tf.Tensor"]:
"""simple docstring"""
a = random.Random()
a = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(A , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = 42
__A = 42
__A = "TensorFlow"
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return tf.__version__
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
a = self._prepare_inference_func(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return self._measure_speed(_inference )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
a = self._prepare_train_func(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return self._measure_speed(_train )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase_ )
a = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
a = self._prepare_inference_func(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return self._measure_memory(_inference )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase_ )
a = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
a = self._prepare_train_func(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return self._measure_memory(_train )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
a = (
hasattr(lowerCamelCase_ , "architectures" )
and isinstance(config.architectures , lowerCamelCase_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
a = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
a = __import__("transformers" , fromlist=[model_class] )
a = getattr(lowerCamelCase_ , lowerCamelCase_ )
a = model_cls(lowerCamelCase_ )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
a = TF_MODEL_MAPPING[config.__class__](lowerCamelCase_ )
# encoder-decoder has vocab size saved differently
a = config.vocab_size if hasattr(lowerCamelCase_ , "vocab_size" ) else config.encoder.vocab_size
a = random_input_ids(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(lowerCamelCase_ , decoder_input_ids=lowerCamelCase_ , training=lowerCamelCase_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(lowerCamelCase_ , training=lowerCamelCase_ )
a = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
a = (
hasattr(lowerCamelCase_ , "architectures" )
and isinstance(config.architectures , lowerCamelCase_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
a = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
a = __import__("transformers" , fromlist=[model_class] )
a = getattr(lowerCamelCase_ , lowerCamelCase_ )
a = model_cls(lowerCamelCase_ )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
a = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowerCamelCase_ )
# encoder-decoder has vocab size saved differently
a = config.vocab_size if hasattr(lowerCamelCase_ , "vocab_size" ) else config.encoder.vocab_size
a = random_input_ids(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
a = model(lowerCamelCase_ , decoder_input_ids=lowerCamelCase_ , labels=lowerCamelCase_ , training=lowerCamelCase_ )[0]
a = tf.gradients(lowerCamelCase_ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
a = model(lowerCamelCase_ , labels=lowerCamelCase_ , training=lowerCamelCase_ )[0]
a = tf.gradients(lowerCamelCase_ , model.trainable_variables )
return gradients
a = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(lowerCamelCase_ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
a = timeit.repeat(
lowerCamelCase_ , repeat=self.args.repeat , number=10 , )
return min(lowerCamelCase_ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
a = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
a = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
a = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
a = nvml.nvmlDeviceGetMemoryInfo(lowerCamelCase_ )
a = meminfo.used
a = Memory(lowerCamelCase_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
a = None
else:
a = measure_peak_memory_cpu(lowerCamelCase_ )
a = Memory(lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
a = stop_memory_tracing(lowerCamelCase_ )
if memory is None:
a = summary.total
else:
a = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 71 | 1 |
"""simple docstring"""
from math import factorial
def _snake_case ( lowercase__ = 100 ):
return sum(int(lowercase__ ) for x in str(factorial(lowercase__ ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip()))) | 96 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 308 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( __a ):
"""simple docstring"""
lowercase = ["image_processor", "tokenizer"]
lowercase = "BridgeTowerImageProcessor"
lowercase = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self : str , snake_case_ : List[str] , snake_case_ : Dict ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self : List[str] , snake_case_ : Optional[Any] , snake_case_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case_ : bool = True , snake_case_ : Union[bool, str, PaddingStrategy] = False , snake_case_ : Union[bool, str, TruncationStrategy] = None , snake_case_ : Optional[int] = None , snake_case_ : int = 0 , snake_case_ : Optional[int] = None , snake_case_ : Optional[bool] = None , snake_case_ : Optional[bool] = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = True , snake_case_ : Optional[Union[str, TensorType]] = None , **snake_case_ : List[Any] , ):
snake_case__ : str = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# add pixel_values + pixel_mask
snake_case__ : Dict = self.image_processor(
UpperCamelCase__ , return_tensors=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_center_crop=UpperCamelCase__ , **UpperCamelCase__ )
encoding.update(UpperCamelCase__ )
return encoding
def lowerCamelCase ( self : List[str] , *snake_case_ : Union[str, Any] , **snake_case_ : List[str] ):
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase ( self : int , *snake_case_ : int , **snake_case_ : Tuple ):
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def lowerCamelCase ( self : Any ):
snake_case__ : Optional[Any] = self.tokenizer.model_input_names
snake_case__ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 368 |
'''simple docstring'''
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : int ):
snake_case__ : List[str] = """hf-internal-testing/tiny-random-t5"""
snake_case__ : Any = AutoTokenizer.from_pretrained(snake_case_ )
snake_case__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(snake_case_ )
snake_case__ : Union[str, Any] = tokenizer("""This is me""" , return_tensors="""pt""" )
snake_case__ : str = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
snake_case__ : Optional[int] = model.generate(**snake_case_ )
snake_case__ : Any = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ )
snake_case__ : int = AutoModelForSeqaSeqLM.from_pretrained(snake_case_ )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
snake_case__ : Optional[Any] = model_reloaded.generate(**snake_case_ )
self.assertTrue(torch.allclose(snake_case_ , snake_case_ ) )
def lowerCamelCase ( self : List[Any] ):
snake_case__ : Optional[Any] = """hf-internal-testing/tiny-random-t5"""
snake_case__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(snake_case_ )
snake_case__ : int = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(snake_case_ ):
model.save_pretrained(snake_case_ )
snake_case__ : int = model.reverse_bettertransformer()
model.save_pretrained(snake_case_ )
| 43 | 0 |
"""simple docstring"""
from math import pi
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
return 2 * pi * radius * (angle / 3_60)
if __name__ == "__main__":
print(arc_length(90, 10))
| 64 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__A = "."
if __name__ == "__main__":
__A = os.path.join(REPO_PATH, "utils/documentation_tests.txt")
__A = []
__A = []
with open(doctest_file_path) as fp:
for line in fp:
__A = line.strip()
__A = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__A = "\n".join(non_existent_paths)
raise ValueError(f'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
| 10 | 0 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_snake_case = {"UserAgent": UserAgent().random}
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = script.contents[0]
_A : Tuple = json.loads(data[data.find("""{\"config\"""" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowercase :
def __init__( self , _a ) -> Union[str, Any]:
_A : Optional[int] = F'''https://www.instagram.com/{username}/'''
_A : Union[str, Any] = self.get_json()
def a__ ( self ) -> dict:
_A : Tuple = requests.get(self.url , headers=_a ).text
_A : Dict = BeautifulSoup(_a , """html.parser""" ).find_all("""script""" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ) -> str:
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self ) -> str:
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def a__ ( self ) -> str:
return self.user_data["username"]
@property
def a__ ( self ) -> str:
return self.user_data["full_name"]
@property
def a__ ( self ) -> str:
return self.user_data["biography"]
@property
def a__ ( self ) -> str:
return self.user_data["business_email"]
@property
def a__ ( self ) -> str:
return self.user_data["external_url"]
@property
def a__ ( self ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def a__ ( self ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def a__ ( self ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def a__ ( self ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def a__ ( self ) -> bool:
return self.user_data["is_verified"]
@property
def a__ ( self ) -> bool:
return self.user_data["is_private"]
def lowerCAmelCase_ ( snake_case_ = "github" ):
import os
if os.environ.get("""CI""" ):
return # test failing on GitHub Actions
_A : Tuple = InstagramUser(snake_case_ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data,snake_case_ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("""https://instagram.""" )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = InstagramUser("github")
print(instagram_user)
print(f"""{instagram_user.number_of_posts = }""")
print(f"""{instagram_user.number_of_followers = }""")
print(f"""{instagram_user.number_of_followings = }""")
print(f"""{instagram_user.email = }""")
print(f"""{instagram_user.website = }""")
print(f"""{instagram_user.profile_picture_url = }""")
print(f"""{instagram_user.is_verified = }""")
print(f"""{instagram_user.is_private = }""")
| 370 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : Any = None
if token is not None:
_A : int = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_A : Any = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_A : Union[str, Any] = requests.get(snake_case_,headers=snake_case_ ).json()
_A : str = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_A : int = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(snake_case_ ):
_A : List[str] = requests.get(url + f'''&page={i + 2}''',headers=snake_case_ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : int = None
if token is not None:
_A : List[str] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_A : str = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
_A : Optional[Any] = requests.get(snake_case_,headers=snake_case_ ).json()
_A : Any = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
_A : Tuple = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(snake_case_ ):
_A : List[Any] = requests.get(url + f'''&page={i + 2}''',headers=snake_case_ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : Dict = None
if token is not None:
_A : int = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_A : Tuple = requests.get(snake_case_,headers=snake_case_,allow_redirects=snake_case_ )
_A : Tuple = result.headers["""Location"""]
_A : Union[str, Any] = requests.get(snake_case_,allow_redirects=snake_case_ )
_A : Dict = os.path.join(snake_case_,f'''{artifact_name}.zip''' )
with open(snake_case_,"""wb""" ) as fp:
fp.write(response.content )
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : List[str] = []
_A : int = []
_A : Tuple = None
with zipfile.ZipFile(snake_case_ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(snake_case_ ) as f:
for line in f:
_A : Any = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_A : Dict = line[: line.index(""": """ )]
_A : Dict = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
_A : List[str] = line[len("""FAILED """ ) :]
failed_tests.append(snake_case_ )
elif filename == "job_name.txt":
_A : Optional[int] = line
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
f'''`errors` and `failed_tests` should have the same number of elements. Got {len(snake_case_ )} for `errors` '''
f'''and {len(snake_case_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
""" problem.""" )
_A : Any = None
if job_name and job_links:
_A : Dict = job_links.get(snake_case_,snake_case_ )
# A list with elements of the form (line of error, error, failed test)
_A : Optional[int] = [x + [y] + [job_link] for x, y in zip(snake_case_,snake_case_ )]
return result
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : Dict = []
_A : Optional[int] = [os.path.join(snake_case_,snake_case_ ) for p in os.listdir(snake_case_ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(snake_case_,job_links=snake_case_ ) )
return errors
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : Dict = Counter()
counter.update([x[1] for x in logs] )
_A : Tuple = counter.most_common()
_A : Tuple = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_A : str = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
_A : Union[str, Any] = dict(sorted(r.items(),key=lambda snake_case_ : item[1]["count"],reverse=snake_case_ ) )
return r
def lowerCAmelCase_ ( snake_case_ ):
_A : Union[str, Any] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
_A : Dict = test.split("""/""" )[2]
else:
_A : str = None
return test
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : str = [(x[0], x[1], get_model(x[2] )) for x in logs]
_A : Union[str, Any] = [x for x in logs if x[2] is not None]
_A : Optional[Any] = {x[2] for x in logs}
_A : List[Any] = {}
for test in tests:
_A : Any = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_A : Union[str, Any] = counter.most_common()
_A : Any = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_A : str = sum(error_counts.values() )
if n_errors > 0:
_A : Optional[int] = {"""count""": n_errors, """errors""": error_counts}
_A : Union[str, Any] = dict(sorted(r.items(),key=lambda snake_case_ : item[1]["count"],reverse=snake_case_ ) )
return r
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[int] = """| no. | error | status |"""
_A : List[Any] = """|-:|:-|:-|"""
_A : List[Any] = [header, sep]
for error in reduced_by_error:
_A : List[str] = reduced_by_error[error]["""count"""]
_A : List[Any] = f'''| {count} | {error[:100]} | |'''
lines.append(snake_case_ )
return "\n".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = """| model | no. of errors | major error | count |"""
_A : Optional[Any] = """|-:|-:|-:|-:|"""
_A : Union[str, Any] = [header, sep]
for model in reduced_by_model:
_A : Dict = reduced_by_model[model]["""count"""]
_A , _A : str = list(reduced_by_model[model]["""errors"""].items() )[0]
_A : Union[str, Any] = f'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(snake_case_ )
return "\n".join(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
_snake_case = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_snake_case = get_job_links(args.workflow_run_id, token=args.token)
_snake_case = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_snake_case = k.find(" / ")
_snake_case = k[index + len(" / ") :]
_snake_case = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_snake_case = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_snake_case = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_snake_case = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_snake_case = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_snake_case = reduce_by_error(errors)
_snake_case = reduce_by_model(errors)
_snake_case = make_github_table(reduced_by_error)
_snake_case = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 343 | 0 |
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__UpperCamelCase = importlib.util.find_spec('''s3fs''') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__UpperCamelCase = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase (SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
if "://" in dataset_path:
SCREAMING_SNAKE_CASE = dataset_path.split('://' )[1]
return dataset_path
def lowercase (SCREAMING_SNAKE_CASE_ : fsspec.AbstractFileSystem ) -> Optional[Any]:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase (SCREAMING_SNAKE_CASE_ : fsspec.AbstractFileSystem , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = not is_remote_filesystem(SCREAMING_SNAKE_CASE_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(SCREAMING_SNAKE_CASE_ ) , fs._strip_protocol(SCREAMING_SNAKE_CASE_ ) )
else:
fs.mv(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , recursive=SCREAMING_SNAKE_CASE_ )
def lowercase () -> int:
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = threading.Lock()
| 113 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 46 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Dict = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __lowercase ( snake_case_ ):
"""simple docstring"""
_UpperCAmelCase : Dict = '''speech_to_text'''
_UpperCAmelCase : Any = ['''past_key_values''']
_UpperCAmelCase : Optional[int] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Tuple , lowerCAmelCase__ : Tuple=1_0000 , lowerCAmelCase__ : List[str]=12 , lowerCAmelCase__ : List[Any]=2048 , lowerCAmelCase__ : List[Any]=4 , lowerCAmelCase__ : Dict=6 , lowerCAmelCase__ : Tuple=2048 , lowerCAmelCase__ : Any=4 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : List[str]=0.0 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : int=True , lowerCAmelCase__ : str="relu" , lowerCAmelCase__ : Tuple=256 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : Any=0.0 , lowerCAmelCase__ : List[Any]=0.0 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : str=2 , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : Union[str, Any]=1 , lowerCAmelCase__ : Dict=0 , lowerCAmelCase__ : Tuple=2 , lowerCAmelCase__ : List[str]=6000 , lowerCAmelCase__ : List[Any]=1024 , lowerCAmelCase__ : Optional[Any]=2 , lowerCAmelCase__ : Tuple=(5, 5) , lowerCAmelCase__ : Optional[Any]=1024 , lowerCAmelCase__ : Tuple=80 , lowerCAmelCase__ : Dict=1 , **lowerCAmelCase__ : Any , ):
SCREAMING_SNAKE_CASE_: List[Any] = vocab_size
SCREAMING_SNAKE_CASE_: Dict = d_model
SCREAMING_SNAKE_CASE_: Dict = encoder_ffn_dim
SCREAMING_SNAKE_CASE_: List[str] = encoder_layers
SCREAMING_SNAKE_CASE_: Optional[int] = encoder_attention_heads
SCREAMING_SNAKE_CASE_: Tuple = decoder_ffn_dim
SCREAMING_SNAKE_CASE_: Optional[int] = decoder_layers
SCREAMING_SNAKE_CASE_: Union[str, Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE_: List[Any] = dropout
SCREAMING_SNAKE_CASE_: Tuple = attention_dropout
SCREAMING_SNAKE_CASE_: str = activation_dropout
SCREAMING_SNAKE_CASE_: Union[str, Any] = activation_function
SCREAMING_SNAKE_CASE_: Any = init_std
SCREAMING_SNAKE_CASE_: Optional[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE_: int = decoder_layerdrop
SCREAMING_SNAKE_CASE_: Union[str, Any] = use_cache
SCREAMING_SNAKE_CASE_: Any = encoder_layers
SCREAMING_SNAKE_CASE_: str = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE_: Dict = max_source_positions
SCREAMING_SNAKE_CASE_: Union[str, Any] = max_target_positions
SCREAMING_SNAKE_CASE_: Optional[int] = num_conv_layers
SCREAMING_SNAKE_CASE_: List[Any] = list(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = conv_channels
SCREAMING_SNAKE_CASE_: Optional[Any] = input_feat_per_channel
SCREAMING_SNAKE_CASE_: List[Any] = input_channels
if len(self.conv_kernel_sizes) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` "
F"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, "
F"`config.num_conv_layers = {self.num_conv_layers}`.")
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 356 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class __lowercase ( tf.keras.optimizers.schedules.LearningRateSchedule ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : float , lowerCAmelCase__ : Callable , lowerCAmelCase__ : int , lowerCAmelCase__ : float = 1.0 , lowerCAmelCase__ : str = None , ):
super().__init__()
SCREAMING_SNAKE_CASE_: str = initial_learning_rate
SCREAMING_SNAKE_CASE_: Dict = warmup_steps
SCREAMING_SNAKE_CASE_: Any = power
SCREAMING_SNAKE_CASE_: int = decay_schedule_fn
SCREAMING_SNAKE_CASE_: Union[str, Any] = name
def __call__( self : Optional[Any] , lowerCAmelCase__ : Any):
with tf.name_scope(self.name or "WarmUp") as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
SCREAMING_SNAKE_CASE_: Any = tf.cast(lowerCAmelCase__ , tf.floataa)
SCREAMING_SNAKE_CASE_: Optional[Any] = tf.cast(self.warmup_steps , tf.floataa)
SCREAMING_SNAKE_CASE_: Optional[int] = global_step_float / warmup_steps_float
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.initial_learning_rate * tf.math.pow(lowerCAmelCase__ , self.power)
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps) , name=lowerCAmelCase__ , )
def _SCREAMING_SNAKE_CASE ( self : Tuple):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 0.9 , _UpperCAmelCase = 0.9_9_9 , _UpperCAmelCase = 1e-8 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 1.0 , _UpperCAmelCase = None , ):
SCREAMING_SNAKE_CASE_: Optional[int] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_UpperCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_UpperCAmelCase , )
if num_warmup_steps:
SCREAMING_SNAKE_CASE_: Tuple = WarmUp(
initial_learning_rate=_UpperCAmelCase , decay_schedule_fn=_UpperCAmelCase , warmup_steps=_UpperCAmelCase , )
if weight_decay_rate > 0.0:
SCREAMING_SNAKE_CASE_: List[str] = AdamWeightDecay(
learning_rate=_UpperCAmelCase , weight_decay_rate=_UpperCAmelCase , beta_a=_UpperCAmelCase , beta_a=_UpperCAmelCase , epsilon=_UpperCAmelCase , clipnorm=_UpperCAmelCase , global_clipnorm=_UpperCAmelCase , exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"] , include_in_weight_decay=_UpperCAmelCase , )
else:
SCREAMING_SNAKE_CASE_: int = tf.keras.optimizers.Adam(
learning_rate=_UpperCAmelCase , beta_a=_UpperCAmelCase , beta_a=_UpperCAmelCase , epsilon=_UpperCAmelCase , clipnorm=_UpperCAmelCase , global_clipnorm=_UpperCAmelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , lowerCAmelCase__ : float = 0.9 , lowerCAmelCase__ : float = 0.999 , lowerCAmelCase__ : float = 1E-7 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : float = 0.0 , lowerCAmelCase__ : Optional[List[str]] = None , lowerCAmelCase__ : Optional[List[str]] = None , lowerCAmelCase__ : str = "AdamWeightDecay" , **lowerCAmelCase__ : int , ):
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = weight_decay_rate
SCREAMING_SNAKE_CASE_: List[Any] = include_in_weight_decay
SCREAMING_SNAKE_CASE_: List[Any] = exclude_from_weight_decay
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: List[str] = {"WarmUp": WarmUp}
return super(lowerCAmelCase__ , cls).from_config(lowerCAmelCase__ , custom_objects=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int]):
super(lowerCAmelCase__ , self)._prepare_local(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = tf.constant(
self.weight_decay_rate , name="adam_weight_decay_rate")
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple):
SCREAMING_SNAKE_CASE_: str = self._do_use_weight_decay(var.name)
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["weight_decay_rate"] , use_locking=self._use_locking , )
return tf.no_op()
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any]=None , **lowerCAmelCase__ : List[str]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = list(zip(*lowerCAmelCase__))
return super(lowerCAmelCase__ , self).apply_gradients(zip(lowerCAmelCase__ , lowerCAmelCase__) , name=lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
SCREAMING_SNAKE_CASE_: Dict = apply_state or {}
SCREAMING_SNAKE_CASE_: List[str] = apply_state.get((var_device, var_dtype))
if coefficients is None:
SCREAMING_SNAKE_CASE_: Optional[int] = self._fallback_apply_state(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple=None):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = self._decay_weights_op(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
with tf.control_dependencies([decay]):
return super(lowerCAmelCase__ , self)._resource_apply_dense(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict=None):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = self._get_lr(var.device , var.dtype.base_dtype , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = self._decay_weights_op(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
with tf.control_dependencies([decay]):
return super(lowerCAmelCase__ , self)._resource_apply_sparse(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: List[str] = super().get_config()
config.update({"weight_decay_rate": self.weight_decay_rate})
return config
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Tuple):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowerCAmelCase__ , lowerCAmelCase__) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowerCAmelCase__ , lowerCAmelCase__) is not None:
return False
return True
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Any = []
SCREAMING_SNAKE_CASE_: Any = None
@property
def _SCREAMING_SNAKE_CASE ( self : int):
if self._accum_steps is None:
SCREAMING_SNAKE_CASE_: Tuple = tf.Variable(
tf.constant(0 , dtype=tf.intaa) , trainable=lowerCAmelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple):
if not self._gradients:
raise ValueError("The accumulator should be called first to initialize the gradients")
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : str , lowerCAmelCase__ : Tuple):
if not self._gradients:
SCREAMING_SNAKE_CASE_: Optional[Any] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowerCAmelCase__) , trainable=lowerCAmelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
])
if len(lowerCAmelCase__) != len(self._gradients):
raise ValueError(F"Expected {len(self._gradients)} gradients, but got {len(lowerCAmelCase__)}")
for accum_gradient, gradient in zip(self._gradients , lowerCAmelCase__):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowerCAmelCase__)
self._accum_steps.assign_add(1)
def _SCREAMING_SNAKE_CASE ( self : int):
if not self._gradients:
return
self._accum_steps.assign(0)
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowerCAmelCase__))
| 127 | 0 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase: Optional[int] = logging.get_logger(__name__)
_lowercase: List[Any] = {
"b0": efficientnet.EfficientNetBa,
"b1": efficientnet.EfficientNetBa,
"b2": efficientnet.EfficientNetBa,
"b3": efficientnet.EfficientNetBa,
"b4": efficientnet.EfficientNetBa,
"b5": efficientnet.EfficientNetBa,
"b6": efficientnet.EfficientNetBa,
"b7": efficientnet.EfficientNetBa,
}
_lowercase: Optional[int] = {
"b0": {
"hidden_dim": 1280,
"width_coef": 1.0,
"depth_coef": 1.0,
"image_size": 224,
"dropout_rate": 0.2,
"dw_padding": [],
},
"b1": {
"hidden_dim": 1280,
"width_coef": 1.0,
"depth_coef": 1.1,
"image_size": 240,
"dropout_rate": 0.2,
"dw_padding": [16],
},
"b2": {
"hidden_dim": 1408,
"width_coef": 1.1,
"depth_coef": 1.2,
"image_size": 260,
"dropout_rate": 0.3,
"dw_padding": [5, 8, 16],
},
"b3": {
"hidden_dim": 1536,
"width_coef": 1.2,
"depth_coef": 1.4,
"image_size": 300,
"dropout_rate": 0.3,
"dw_padding": [5, 18],
},
"b4": {
"hidden_dim": 1792,
"width_coef": 1.4,
"depth_coef": 1.8,
"image_size": 380,
"dropout_rate": 0.4,
"dw_padding": [6],
},
"b5": {
"hidden_dim": 2048,
"width_coef": 1.6,
"depth_coef": 2.2,
"image_size": 456,
"dropout_rate": 0.4,
"dw_padding": [13, 27],
},
"b6": {
"hidden_dim": 2304,
"width_coef": 1.8,
"depth_coef": 2.6,
"image_size": 528,
"dropout_rate": 0.5,
"dw_padding": [31],
},
"b7": {
"hidden_dim": 2560,
"width_coef": 2.0,
"depth_coef": 3.1,
"image_size": 600,
"dropout_rate": 0.5,
"dw_padding": [18],
},
}
def a( A : str ) -> str:
"""simple docstring"""
a = EfficientNetConfig()
a = CONFIG_MAP[model_name]["hidden_dim"]
a = CONFIG_MAP[model_name]["width_coef"]
a = CONFIG_MAP[model_name]["depth_coef"]
a = CONFIG_MAP[model_name]["image_size"]
a = CONFIG_MAP[model_name]["dropout_rate"]
a = CONFIG_MAP[model_name]["dw_padding"]
a = "huggingface/label-files"
a = "imagenet-1k-id2label.json"
a = 1000
a = json.load(open(hf_hub_download(A , A , repo_type="dataset" ) , "r" ) )
a = {int(A ): v for k, v in idalabel.items()}
a = idalabel
a = {v: k for k, v in idalabel.items()}
return config
def a( ) -> Optional[int]:
"""simple docstring"""
a = "http://images.cocodataset.org/val2017/000000039769.jpg"
a = Image.open(requests.get(A , stream=A ).raw )
return im
def a( A : List[str] ) -> Dict:
"""simple docstring"""
a = CONFIG_MAP[model_name]["image_size"]
a = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] , do_center_crop=A , )
return preprocessor
def a( A : Union[str, Any] ) -> int:
"""simple docstring"""
a = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
a = sorted(set(A ) )
a = len(A )
a = {b: str(A ) for b, i in zip(A , range(A ) )}
a = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
a = block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
a = {}
for item in rename_keys:
if item[0] in original_param_names:
a = "efficientnet." + item[1]
a = "classifier.weight"
a = "classifier.bias"
return key_mapping
def a( A : Union[str, Any] , A : Optional[int] , A : str ) -> List[Any]:
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
a = key_mapping[key]
if "_conv" in key and "kernel" in key:
a = torch.from_numpy(A ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
a = torch.from_numpy(A ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
a = torch.from_numpy(np.transpose(A ) )
else:
a = torch.from_numpy(A )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(A )
@torch.no_grad()
def a( A : Tuple , A : Optional[int] , A : List[Any] , A : Any ) -> Dict:
"""simple docstring"""
a = model_classes[model_name](
include_top=A , weights="imagenet" , input_tensor=A , input_shape=A , pooling=A , classes=1000 , classifier_activation="softmax" , )
a = original_model.trainable_variables
a = original_model.non_trainable_variables
a = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
a = param.numpy()
a = list(tf_params.keys() )
# Load HuggingFace model
a = get_efficientnet_config(A )
a = EfficientNetForImageClassification(A ).eval()
a = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
a = rename_keys(A )
replace_params(A , A , A )
# Initialize preprocessor and preprocess input image
a = convert_image_processor(A )
a = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
a = hf_model(**A )
a = outputs.logits.detach().numpy()
# Original model inference
a = False
a = CONFIG_MAP[model_name]["image_size"]
a = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
a = image.img_to_array(A )
a = np.expand_dims(A , axis=0 )
a = original_model.predict(A )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(A , A , atol=1e-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(A ):
os.mkdir(A )
# Save converted model and image processor
hf_model.save_pretrained(A )
preprocessor.save_pretrained(A )
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''' )
a = f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(A )
hf_model.push_to_hub(A )
if __name__ == "__main__":
_lowercase: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="b0",
type=str,
help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="hf_model",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--save_model", action="store_true", help="Save model to local")
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
_lowercase: List[str] = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 227 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def a( A : dict ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def a( A : np.ndarray , A : np.ndarray ) -> XGBClassifier:
"""simple docstring"""
a = XGBClassifier()
classifier.fit(A , A )
return classifier
def a( ) -> None:
"""simple docstring"""
a = load_iris()
a , a = data_handling(A )
a , a , a , a = train_test_split(
A , A , test_size=0.25 )
a = iris["target_names"]
# Create an XGBoost Classifier from the training data
a = xgboost(A , A )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
A , A , A , display_labels=A , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 227 | 1 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'The `image_to_image.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionImg2ImgPipeline` instead.'
) | 270 |
'''simple docstring'''
from math import sqrt
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = 0
for i in range(1 , int(sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) ):
if n % i == 0 and i != sqrt(_SCREAMING_SNAKE_CASE ):
total += i + n // i
elif i == sqrt(_SCREAMING_SNAKE_CASE ):
total += i
return total - n
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE = 1_0000 ):
_snake_case = sum(
i
for i in range(1 , _SCREAMING_SNAKE_CASE )
if sum_of_divisors(sum_of_divisors(_SCREAMING_SNAKE_CASE ) ) == i and sum_of_divisors(_SCREAMING_SNAKE_CASE ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 270 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : Dict = BlenderbotSmallTokenizer
snake_case__ : Tuple = False
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
super().setUp()
_lowerCamelCase : Dict = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
_lowerCamelCase : List[Any] = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_lowerCamelCase : Tuple = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
_lowerCamelCase : Tuple = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
_lowerCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Any , **__lowerCAmelCase : Dict ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : List[str] = '''adapt act apte'''
_lowerCamelCase : Union[str, Any] = '''adapt act apte'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCamelCase : Optional[int] = '''adapt act apte'''
_lowerCamelCase : Dict = ['''adapt''', '''act''', '''ap@@''', '''te''']
_lowerCamelCase : Tuple = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_lowerCamelCase : str = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1_3_8_4]
_lowerCamelCase : Optional[int] = '''I am a small frog.'''
_lowerCamelCase : Optional[int] = tok([src_text] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase )['''input_ids''']
_lowerCamelCase : str = tok.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : int = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
_lowerCamelCase : Dict = '''I am a small frog .'''
_lowerCamelCase : Tuple = '''.'''
_lowerCamelCase : List[str] = tok(__lowerCAmelCase )['''input_ids''']
_lowerCamelCase : Dict = tok(__lowerCAmelCase )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 72 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = KandinskyVaaControlnetPipeline
__lowerCamelCase : Optional[int] = ["image_embeds", "negative_image_embeds", "hint"]
__lowerCamelCase : Dict = ["image_embeds", "negative_image_embeds", "hint"]
__lowerCamelCase : List[str] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__lowerCamelCase : Dict = False
@property
def _lowerCAmelCase ( self ):
return 32
@property
def _lowerCAmelCase ( self ):
return 32
@property
def _lowerCAmelCase ( self ):
return self.time_input_dim
@property
def _lowerCAmelCase ( self ):
return self.time_input_dim * 4
@property
def _lowerCAmelCase ( self ):
return 100
@property
def _lowerCAmelCase ( self ):
torch.manual_seed(0 )
A : Any = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
A : List[str] = UNetaDConditionModel(**lowerCamelCase__ )
return model
@property
def _lowerCAmelCase ( self ):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _lowerCAmelCase ( self ):
torch.manual_seed(0 )
A : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.dummy_unet
A : Tuple = self.dummy_movq
A : List[Any] = DDIMScheduler(
num_train_timesteps=1000, beta_schedule="""linear""", beta_start=0.0_0085, beta_end=0.012, clip_sample=lowerCamelCase__, set_alpha_to_one=lowerCamelCase__, steps_offset=1, prediction_type="""epsilon""", thresholding=lowerCamelCase__, )
A : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=0 ):
A : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
A : List[Any] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
lowerCamelCase__ )
# create hint
A : int = floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
if str(lowerCamelCase__ ).startswith("""mps""" ):
A : Optional[Any] = torch.manual_seed(lowerCamelCase__ )
else:
A : str = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
A : List[str] = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def _lowerCAmelCase ( self ):
A : Dict = """cpu"""
A : List[str] = self.get_dummy_components()
A : Dict = self.pipeline_class(**lowerCamelCase__ )
A : Optional[Any] = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A : int = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
A : Union[str, Any] = output.images
A : str = pipe(
**self.get_dummy_inputs(lowerCamelCase__ ), return_dict=lowerCamelCase__, )[0]
A : Optional[int] = image[0, -3:, -3:, -1]
A : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A : Dict = np.array(
[0.695_9826, 0.86_8279, 0.755_8092, 0.6876_9467, 0.8580_5804, 0.6597_7496, 0.4488_5302, 0.595_9111, 0.425_1595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ):
A : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
A : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
A : Optional[Any] = torch.from_numpy(np.array(lowerCamelCase__ ) ).float() / 255.0
A : List[str] = hint.permute(2, 0, 1 ).unsqueeze(0 )
A : Union[str, Any] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""", torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase__ )
A : Tuple = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""", torch_dtype=torch.floataa )
A : Union[str, Any] = pipeline.to(lowerCamelCase__ )
pipeline.set_progress_bar_config(disable=lowerCamelCase__ )
A : Optional[Any] = """A robot, 4k photo"""
A : Union[str, Any] = torch.Generator(device="""cuda""" ).manual_seed(0 )
A , A : int = pipe_prior(
lowerCamelCase__, generator=lowerCamelCase__, num_inference_steps=5, negative_prompt="""""", ).to_tuple()
A : Union[str, Any] = torch.Generator(device="""cuda""" ).manual_seed(0 )
A : int = pipeline(
image_embeds=lowerCamelCase__, negative_image_embeds=lowerCamelCase__, hint=lowerCamelCase__, generator=lowerCamelCase__, num_inference_steps=100, output_type="""np""", )
A : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowerCamelCase__, lowerCamelCase__ )
| 116 | 0 |
"""simple docstring"""
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = 1
@register_to_config
def __init__( self ,lowerCamelCase_ = 1_0_0_0 ,lowerCamelCase_ = None ) -> Union[str, Any]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(lowerCamelCase_ )
# standard deviation of the initial noise distribution
A = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
A = 4
# running values
A = []
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ) -> Optional[Any]:
A = num_inference_steps
A = torch.linspace(1 ,0 ,num_inference_steps + 1 )[:-1]
A = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
A = torch.tensor(self.config.trained_betas ,dtype=torch.floataa )
else:
A = torch.sin(steps * math.pi / 2 ) ** 2
A = (1.0 - self.betas**2) ** 0.5
A = (torch.atana(self.betas ,self.alphas ) / math.pi * 2)[:-1]
A = timesteps.to(lowerCamelCase_ )
A = []
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = True ,) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
A = (self.timesteps == timestep).nonzero().item()
A = timestep_index + 1
A = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(lowerCamelCase_ )
if len(self.ets ) == 1:
A = self.ets[-1]
elif len(self.ets ) == 2:
A = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
A = (2_3 * self.ets[-1] - 1_6 * self.ets[-2] + 5 * self.ets[-3]) / 1_2
else:
A = (1 / 2_4) * (5_5 * self.ets[-1] - 5_9 * self.ets[-2] + 3_7 * self.ets[-3] - 9 * self.ets[-4])
A = self._get_prev_sample(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> torch.FloatTensor:
return sample
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[str]:
A = self.alphas[timestep_index]
A = self.betas[timestep_index]
A = self.alphas[prev_timestep_index]
A = self.betas[prev_timestep_index]
A = (sample - sigma * ets) / max(lowerCamelCase_ ,1E-8 )
A = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 77 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase =logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> Any:
super().__init__(*lowerCamelCase_ ,**lowerCamelCase_ )
requires_backends(self ,"""vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def UpperCamelCase__ ( self ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_=None ) -> int:
A = {}
A = {}
if prompt is not None:
A = prompt
if generate_kwargs is not None:
A = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
A = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
A = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self ,lowerCamelCase_ ,**lowerCamelCase_ ) -> Any:
return super().__call__(lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_=None ) -> Optional[Any]:
A = load_image(lowerCamelCase_ )
if prompt is not None:
if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
raise ValueError(
f'Received an invalid text input, got - {type(lowerCamelCase_ )} - but expected a single string. '
"""Note also that one single text can be provided for conditional image to text generation.""" )
A = self.model.config.model_type
if model_type == "git":
A = self.image_processor(images=lowerCamelCase_ ,return_tensors=self.framework )
A = self.tokenizer(text=lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ).input_ids
A = [self.tokenizer.cls_token_id] + input_ids
A = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
A = self.image_processor(images=lowerCamelCase_ ,header_text=lowerCamelCase_ ,return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
A = self.image_processor(images=lowerCamelCase_ ,return_tensors=self.framework )
A = self.tokenizer(lowerCamelCase_ ,return_tensors=self.framework )
model_inputs.update(lowerCamelCase_ )
else:
raise ValueError(f'Model type {model_type} does not support conditional text generation' )
else:
A = self.image_processor(images=lowerCamelCase_ ,return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
A = None
return model_inputs
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_=None ) -> Optional[int]:
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] ,lowerCamelCase_ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
A = None
if generate_kwargs is None:
A = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
A = model_inputs.pop(self.model.main_input_name )
A = self.model.generate(lowerCamelCase_ ,**lowerCamelCase_ ,**lowerCamelCase_ )
return model_outputs
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Optional[Any]:
A = []
for output_ids in model_outputs:
A = {
"""generated_text""": self.tokenizer.decode(
lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ ,)
}
records.append(lowerCamelCase_ )
return records
| 77 | 1 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
_A : Tuple = logging.get_logger(__name__)
def _a ( UpperCAmelCase , UpperCAmelCase ) -> str:
"""simple docstring"""
def run_func(UpperCAmelCase ):
@wraps(UpperCAmelCase )
def run_in_eager_mode(*UpperCAmelCase , **UpperCAmelCase ):
return func(*UpperCAmelCase , **UpperCAmelCase )
@wraps(UpperCAmelCase )
@tf.function(experimental_compile=UpperCAmelCase )
def run_in_graph_mode(*UpperCAmelCase , **UpperCAmelCase ):
return func(*UpperCAmelCase , **UpperCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> ["tf.Tensor"]:
"""simple docstring"""
lowerCamelCase__ : Any = random.Random()
lowerCamelCase__ : str = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(UpperCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : TensorFlowBenchmarkArguments
_UpperCAmelCase : PretrainedConfig
_UpperCAmelCase : str = "TensorFlow"
@property
def __lowerCamelCase ( self : int ) ->int:
return tf.__version__
def __lowerCamelCase ( self : List[Any] , A : str , A : int , A : int ) ->float:
# initialize GPU on separate process
lowerCamelCase__ : Dict = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowerCamelCase__ : int = self._prepare_inference_func(A , A , A )
return self._measure_speed(_inference )
def __lowerCamelCase ( self : Dict , A : str , A : int , A : int ) ->float:
lowerCamelCase__ : Tuple = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowerCamelCase__ : List[Any] = self._prepare_train_func(A , A , A )
return self._measure_speed(_train )
def __lowerCamelCase ( self : Tuple , A : str , A : int , A : int ) ->[Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , A )
lowerCamelCase__ : Dict = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowerCamelCase__ : Tuple = self._prepare_inference_func(A , A , A )
return self._measure_memory(_inference )
def __lowerCamelCase ( self : Tuple , A : str , A : int , A : int ) ->[Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , A )
lowerCamelCase__ : Dict = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowerCamelCase__ : Optional[int] = self._prepare_train_func(A , A , A )
return self._measure_memory(_train )
def __lowerCamelCase ( self : List[Any] , A : str , A : int , A : int ) ->Callable[[], None]:
lowerCamelCase__ : str = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
lowerCamelCase__ : Optional[Any] = (
hasattr(A , '''architectures''' )
and isinstance(config.architectures , A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCamelCase__ : Union[str, Any] = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCamelCase__ : Optional[int] = __import__('''transformers''' , fromlist=[model_class] )
lowerCamelCase__ : str = getattr(A , A )
lowerCamelCase__ : int = model_cls(A )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
lowerCamelCase__ : List[Any] = TF_MODEL_MAPPING[config.__class__](A )
# encoder-decoder has vocab size saved differently
lowerCamelCase__ : List[str] = config.vocab_size if hasattr(A , '''vocab_size''' ) else config.encoder.vocab_size
lowerCamelCase__ : Any = random_input_ids(A , A , A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(A , decoder_input_ids=A , training=A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(A , training=A )
lowerCamelCase__ : Union[str, Any] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __lowerCamelCase ( self : Optional[Any] , A : str , A : int , A : int ) ->Callable[[], None]:
lowerCamelCase__ : Tuple = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' )
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
lowerCamelCase__ : List[Any] = (
hasattr(A , '''architectures''' )
and isinstance(config.architectures , A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCamelCase__ : Dict = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCamelCase__ : Union[str, Any] = __import__('''transformers''' , fromlist=[model_class] )
lowerCamelCase__ : str = getattr(A , A )
lowerCamelCase__ : Any = model_cls(A )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
lowerCamelCase__ : Optional[Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](A )
# encoder-decoder has vocab size saved differently
lowerCamelCase__ : List[Any] = config.vocab_size if hasattr(A , '''vocab_size''' ) else config.encoder.vocab_size
lowerCamelCase__ : Optional[Any] = random_input_ids(A , A , A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
lowerCamelCase__ : Tuple = model(A , decoder_input_ids=A , labels=A , training=A )[0]
lowerCamelCase__ : Dict = tf.gradients(A , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
lowerCamelCase__ : str = model(A , labels=A , training=A )[0]
lowerCamelCase__ : Union[str, Any] = tf.gradients(A , model.trainable_variables )
return gradients
lowerCamelCase__ : str = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __lowerCamelCase ( self : Dict , A : Tuple ) ->float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' )
timeit.repeat(A , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
lowerCamelCase__ : List[Any] = timeit.repeat(
A , repeat=self.args.repeat , number=1_0 , )
return min(A ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
def __lowerCamelCase ( self : List[str] , A : Callable[[], None] ) ->[Memory, MemorySummary]:
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''' )
lowerCamelCase__ : Tuple = start_memory_tracing('''transformers''' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''' )
lowerCamelCase__ : Tuple = '''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''' )
# init nvml
nvml.nvmlInit()
func()
lowerCamelCase__ : str = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
lowerCamelCase__ : Optional[int] = nvml.nvmlDeviceGetMemoryInfo(A )
lowerCamelCase__ : Tuple = meminfo.used
lowerCamelCase__ : Dict = Memory(A )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''' )
lowerCamelCase__ : Optional[int] = None
else:
lowerCamelCase__ : Optional[int] = measure_peak_memory_cpu(A )
lowerCamelCase__ : Tuple = Memory(A ) if isinstance(A , A ) else memory_bytes
if self.args.trace_memory_line_by_line:
lowerCamelCase__ : str = stop_memory_tracing(A )
if memory is None:
lowerCamelCase__ : Dict = summary.total
else:
lowerCamelCase__ : Optional[Any] = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
return "N/A", None
| 142 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_A : str = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
def __init__( self : Dict , *A : Any , **A : List[Any] ) ->None:
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 142 | 1 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__lowerCAmelCase = importlib.util.find_spec('''s3fs''') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__lowerCAmelCase = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def snake_case_ ( snake_case ) -> Tuple:
if "://" in dataset_path:
lowercase__: Union[str, Any] = dataset_path.split('://' )[1]
return dataset_path
def snake_case_ ( snake_case ) -> Optional[Any]:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def snake_case_ ( snake_case , snake_case , snake_case ) -> Any:
lowercase__: Any = not is_remote_filesystem(_a )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(_a ) , fs._strip_protocol(_a ) )
else:
fs.mv(_a , _a , recursive=_a )
def snake_case_ ( ) -> Optional[Any]:
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
lowercase__: Dict = None
lowercase__: str = None
lowercase__: Optional[Any] = threading.Lock()
| 364 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__lowerCAmelCase = logging.get_logger(__name__)
class __a ( __UpperCamelCase ):
__lowercase : Tuple = ['input_values', 'attention_mask']
def __init__( self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 16_000 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = False , lowerCAmelCase__ = 80 , lowerCAmelCase__ = 16 , lowerCAmelCase__ = 64 , lowerCAmelCase__ = "hann_window" , lowerCAmelCase__ = 1.0 , lowerCAmelCase__ = 80 , lowerCAmelCase__ = 7_600 , lowerCAmelCase__ = 1E-10 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = True , **lowerCAmelCase__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(feature_size=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , padding_value=lowerCAmelCase__ , **lowerCAmelCase__ )
lowercase__: Dict = do_normalize
lowercase__: Optional[Any] = return_attention_mask
lowercase__: str = num_mel_bins
lowercase__: Dict = hop_length
lowercase__: Dict = win_length
lowercase__: Optional[int] = win_function
lowercase__: Any = frame_signal_scale
lowercase__: Tuple = fmin
lowercase__: Tuple = fmax
lowercase__: Dict = mel_floor
lowercase__: int = reduction_factor
lowercase__: List[Any] = win_length * sampling_rate // 1_000
lowercase__: Optional[Any] = hop_length * sampling_rate // 1_000
lowercase__: Optional[int] = optimal_fft_length(self.sample_size )
lowercase__: Optional[Any] = (self.n_fft // 2) + 1
lowercase__: str = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowerCAmelCase__ )
lowercase__: Optional[Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , lowerCAmelCase__ , )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , lowerCAmelCase__ , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 0.0 ) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
lowercase__: List[str] = np.array(lowerCAmelCase__ , np.intaa )
lowercase__: Tuple = []
for vector, length in zip(lowerCAmelCase__ , attention_mask.sum(-1 ) ):
lowercase__: int = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
lowercase__: Tuple = padding_value
normed_input_values.append(lowerCAmelCase__ )
else:
lowercase__: Union[str, Any] = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , ) -> np.ndarray:
'''simple docstring'''
lowercase__: List[str] = spectrogram(
lowerCAmelCase__ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , )
return log_mel_spec.T
def __call__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> BatchFeature:
'''simple docstring'''
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if audio is not None:
lowercase__: Dict = self._process_audio(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ , )
else:
lowercase__: str = None
if audio_target is not None:
lowercase__: List[str] = self._process_audio(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ , )
if inputs is None:
return inputs_target
else:
lowercase__: int = inputs_target['input_values']
lowercase__: List[str] = inputs_target.get('attention_mask' )
if decoder_attention_mask is not None:
lowercase__: Optional[int] = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> BatchFeature:
'''simple docstring'''
lowercase__: int = isinstance(lowerCAmelCase__ , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
lowercase__: Tuple = is_batched_numpy or (
isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__: Dict = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(lowerCAmelCase__ , np.ndarray ):
lowercase__: Optional[Any] = np.asarray(lowerCAmelCase__ , dtype=np.floataa )
elif isinstance(lowerCAmelCase__ , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
lowercase__: Optional[Any] = speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__: Optional[int] = [speech]
# needed to make pad() work on spectrogram inputs
lowercase__: str = self.feature_size
# convert into correct format for padding
if is_target:
lowercase__: int = [self._extract_mel_features(lowerCAmelCase__ ) for waveform in speech]
lowercase__: Dict = BatchFeature({'input_values': features} )
lowercase__: Union[str, Any] = self.num_mel_bins
else:
lowercase__: Union[str, Any] = BatchFeature({'input_values': speech} )
lowercase__: Dict = self.pad(
lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
lowercase__: List[str] = feature_size_hack
# convert input values to correct format
lowercase__: Union[str, Any] = padded_inputs['input_values']
if not isinstance(input_values[0] , np.ndarray ):
lowercase__: List[Any] = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(lowerCAmelCase__ , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
lowercase__: Dict = [array.astype(np.floataa ) for array in input_values]
elif isinstance(lowerCAmelCase__ , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
lowercase__: Tuple = input_values.astype(np.floataa )
# convert attention_mask to correct format
lowercase__: Tuple = padded_inputs.get('attention_mask' )
if attention_mask is not None:
lowercase__: str = [np.asarray(lowerCAmelCase__ , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
lowercase__: Tuple = (
attention_mask
if self._get_padding_strategies(lowerCAmelCase__ , max_length=lowerCAmelCase__ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowercase__: str = self.zero_mean_unit_var_norm(
padded_inputs['input_values'] , attention_mask=lowerCAmelCase__ , padding_value=self.padding_value )
if return_tensors is not None:
lowercase__: Union[str, Any] = padded_inputs.convert_to_tensors(lowerCAmelCase__ )
return padded_inputs
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict[str, Any]:
'''simple docstring'''
lowercase__: int = super().to_dict()
# Don't serialize these as they are derived from the other properties.
lowercase__: str = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 288 | 0 |
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
lowercase__ =yaml.safe_load(
'\\nname: ""\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: "Dataset Card for X" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: "Table of Contents"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Dataset Description"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: "Dataset Summary"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Supported Tasks and Leaderboards"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n'
)
lowercase__ ={
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
lowercase__ ='\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowercase__ ='\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowercase__ ={
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Extra Ignored Subsection',
'text': '',
'is_empty_text': True,
'subsections': [],
}
],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
lowercase__ ='\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowercase__ =(
'The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'
)
lowercase__ ='\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowercase__ =(
'The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'
)
lowercase__ ='\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowercase__ ='The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'
lowercase__ ='\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowercase__ ='The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'
lowercase__ ='\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n'
lowercase__ ='The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'
lowercase__ ='\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n'
lowercase__ ='The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'
lowercase__ ='\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n'
lowercase__ ='The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'
lowercase__ ='\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowercase__ ='The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'
lowercase__ ='\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n'
lowercase__ ='The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'
lowercase__ ='\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowercase__ ='The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'
lowercase__ =''
lowercase__ ='The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'
lowercase__ ='\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowercase__ ='The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def __UpperCamelCase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] ):
assert ReadMe.from_string(__lowerCAmelCase , __lowerCAmelCase ).to_dict() == expected_dict
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] ):
with pytest.raises(__lowerCAmelCase , match=re.escape(expected_error.format(path='''root''' ) ) ):
__a : Union[str, Any] = ReadMe.from_string(__lowerCAmelCase , __lowerCAmelCase )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def __UpperCamelCase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple ):
with pytest.raises(__lowerCAmelCase , match=re.escape(expected_error.format(path='''root''' ) ) ):
ReadMe.from_string(__lowerCAmelCase , __lowerCAmelCase )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def __UpperCamelCase ( lowerCAmelCase__ : Tuple ):
ReadMe.from_string(__lowerCAmelCase , __lowerCAmelCase , suppress_parsing_errors=__lowerCAmelCase )
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def __UpperCamelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
__a : Dict = Path(__lowerCAmelCase ) / '''README.md'''
with open(__lowerCAmelCase , '''w+''' ) as readme_file:
readme_file.write(__lowerCAmelCase )
__a : Tuple = ReadMe.from_readme(__lowerCAmelCase , __lowerCAmelCase ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def __UpperCamelCase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] ):
with tempfile.TemporaryDirectory() as tmp_dir:
__a : Any = Path(__lowerCAmelCase ) / '''README.md'''
with open(__lowerCAmelCase , '''w+''' ) as readme_file:
readme_file.write(__lowerCAmelCase )
__a : Optional[Any] = expected_error.format(path=__lowerCAmelCase )
with pytest.raises(__lowerCAmelCase , match=re.escape(__lowerCAmelCase ) ):
__a : Optional[Any] = ReadMe.from_readme(__lowerCAmelCase , __lowerCAmelCase )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def __UpperCamelCase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
__a : Any = Path(__lowerCAmelCase ) / '''README.md'''
with open(__lowerCAmelCase , '''w+''' ) as readme_file:
readme_file.write(__lowerCAmelCase )
__a : int = expected_error.format(path=__lowerCAmelCase )
with pytest.raises(__lowerCAmelCase , match=re.escape(__lowerCAmelCase ) ):
ReadMe.from_readme(__lowerCAmelCase , __lowerCAmelCase )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def __UpperCamelCase ( lowerCAmelCase__ : List[str] ):
with tempfile.TemporaryDirectory() as tmp_dir:
__a : Union[str, Any] = Path(__lowerCAmelCase ) / '''README.md'''
with open(__lowerCAmelCase , '''w+''' ) as readme_file:
readme_file.write(__lowerCAmelCase )
ReadMe.from_readme(__lowerCAmelCase , __lowerCAmelCase , suppress_parsing_errors=__lowerCAmelCase )
| 216 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_a = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = AlbertTokenizer
UpperCamelCase__ = AlbertTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
def UpperCamelCase ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = AlbertTokenizer(UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = 'this is a test'
_UpperCAmelCase = 'this is a test'
return input_text, output_text
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = '<pad>'
_UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '▁eloquent' )
self.assertEqual(len(UpperCAmelCase ) , 3_0000 )
def UpperCamelCase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def UpperCamelCase ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = 'I was born in 92000, and this is falsé.'
_UpperCAmelCase = tokenizer.tokenize(UpperCAmelCase )
_UpperCAmelCase = rust_tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
_UpperCAmelCase = rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(UpperCAmelCase )
_UpperCAmelCase = rust_tokenizer.encode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = AlbertTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
_UpperCAmelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase , ['▁this', '▁is', '▁a', '▁test'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [48, 25, 21, 1289] )
_UpperCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] )
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'] , )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = AlbertTokenizer(UpperCAmelCase )
_UpperCAmelCase = tokenizer.encode('sequence builders' )
_UpperCAmelCase = tokenizer.encode('multi-sequence build' )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name='albert-base-v2' , revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e' , )
| 39 | 0 |
'''simple docstring'''
snake_case_ : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_93_44,
"knot": 1.8_52,
}
snake_case_ : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.2_77_77_77_78,
"mph": 0.6_21_37_11_92,
"knot": 0.5_39_95_68_03,
}
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
_UpperCamelCase : Optional[Any] = (
f'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'
f'Valid values are: {", ".join(UpperCAmelCase_ )}'
)
raise ValueError(UpperCAmelCase_ )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 236 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowercase__ ( lowercase ):
@staticmethod
@abstractmethod
def UpperCamelCase_ ( lowerCamelCase__ : ArgumentParser ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
raise NotImplementedError()
| 236 | 1 |
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def _snake_case ( lowercase__ ):
_lowerCamelCase : str = SwinConfig()
_lowerCamelCase : str = swin_name.split('_' )
_lowerCamelCase : Union[str, Any] = name_split[1]
_lowerCamelCase : str = int(name_split[4] )
_lowerCamelCase : List[Any] = int(name_split[3][-1] )
if model_size == "tiny":
_lowerCamelCase : Any = 96
_lowerCamelCase : Optional[Any] = (2, 2, 6, 2)
_lowerCamelCase : Dict = (3, 6, 12, 24)
elif model_size == "small":
_lowerCamelCase : Tuple = 96
_lowerCamelCase : Tuple = (2, 2, 18, 2)
_lowerCamelCase : Tuple = (3, 6, 12, 24)
elif model_size == "base":
_lowerCamelCase : Tuple = 128
_lowerCamelCase : int = (2, 2, 18, 2)
_lowerCamelCase : Tuple = (4, 8, 16, 32)
else:
_lowerCamelCase : Any = 192
_lowerCamelCase : Tuple = (2, 2, 18, 2)
_lowerCamelCase : str = (6, 12, 24, 48)
if "in22k" in swin_name:
_lowerCamelCase : Any = 21841
else:
_lowerCamelCase : str = 1000
_lowerCamelCase : List[Any] = 'huggingface/label-files'
_lowerCamelCase : List[str] = 'imagenet-1k-id2label.json'
_lowerCamelCase : Optional[Any] = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) )
_lowerCamelCase : int = {int(lowercase__ ): v for k, v in idalabel.items()}
_lowerCamelCase : str = idalabel
_lowerCamelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
_lowerCamelCase : List[str] = img_size
_lowerCamelCase : List[Any] = num_classes
_lowerCamelCase : Any = embed_dim
_lowerCamelCase : Any = depths
_lowerCamelCase : Dict = num_heads
_lowerCamelCase : Any = window_size
return config
def _snake_case ( lowercase__ ):
if "patch_embed.proj" in name:
_lowerCamelCase : Optional[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowerCamelCase : Union[str, Any] = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
_lowerCamelCase : Tuple = 'encoder.' + name
if "attn.proj" in name:
_lowerCamelCase : List[str] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
_lowerCamelCase : str = name.replace('attn' , 'attention.self' )
if "norm1" in name:
_lowerCamelCase : List[Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_lowerCamelCase : List[Any] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_lowerCamelCase : Optional[Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowerCamelCase : str = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
_lowerCamelCase : Union[str, Any] = 'layernorm.weight'
if name == "norm.bias":
_lowerCamelCase : Any = 'layernorm.bias'
if "head" in name:
_lowerCamelCase : Dict = name.replace('head' , 'classifier' )
else:
_lowerCamelCase : Tuple = 'swin.' + name
return name
def _snake_case ( lowercase__ , lowercase__ ):
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Optional[int] = orig_state_dict.pop(lowercase__ )
if "mask" in key:
continue
elif "qkv" in key:
_lowerCamelCase : Dict = key.split('.' )
_lowerCamelCase : Optional[Any] = int(key_split[1] )
_lowerCamelCase : Any = int(key_split[3] )
_lowerCamelCase : Union[str, Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_lowerCamelCase : Union[str, Any] = val[:dim, :]
_lowerCamelCase : int = val[
dim : dim * 2, :
]
_lowerCamelCase : int = val[-dim:, :]
else:
_lowerCamelCase : Optional[int] = val[
:dim
]
_lowerCamelCase : Dict = val[
dim : dim * 2
]
_lowerCamelCase : Optional[Any] = val[
-dim:
]
else:
_lowerCamelCase : str = val
return orig_state_dict
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : List[str] = timm.create_model(lowercase__ , pretrained=lowercase__ )
timm_model.eval()
_lowerCamelCase : Tuple = get_swin_config(lowercase__ )
_lowerCamelCase : Optional[int] = SwinForImageClassification(lowercase__ )
model.eval()
_lowerCamelCase : int = convert_state_dict(timm_model.state_dict() , lowercase__ )
model.load_state_dict(lowercase__ )
_lowerCamelCase : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCamelCase : List[str] = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
_lowerCamelCase : Optional[int] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
_lowerCamelCase : Dict = image_processor(images=lowercase__ , return_tensors='pt' )
_lowerCamelCase : Tuple = timm_model(inputs['pixel_values'] )
_lowerCamelCase : List[Any] = model(**lowercase__ ).logits
assert torch.allclose(lowercase__ , lowercase__ , atol=1E-3 )
print(f'''Saving model {swin_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowercase__ = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path) | 96 |
"""simple docstring"""
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase__ = logging.getLogger()
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[Any] = {}
_lowerCamelCase : List[Any] = os.path.join(lowercase__ , 'all_results.json' )
if os.path.exists(lowercase__ ):
with open(lowercase__ , 'r' ) as f:
_lowerCamelCase : List[Any] = json.load(lowercase__ )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
lowercase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def A_ ( self ):
import xla_spawn
_lowerCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
_lowerCamelCase : List[Any] = F'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(lowercase , 'argv' , lowercase ):
_lowerCamelCase : Dict = time()
xla_spawn.main()
_lowerCamelCase : Any = time()
_lowerCamelCase : Optional[int] = get_results(lowercase )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def A_ ( self ):
import xla_spawn
_lowerCamelCase : Tuple = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(lowercase , 'argv' , lowercase ):
xla_spawn.main() | 96 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=[10, 20, 30, 40] , _SCREAMING_SNAKE_CASE=[1, 1, 2, 1] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , ) -> int:
A_ = parent
A_ = batch_size
A_ = image_size
A_ = num_channels
A_ = embeddings_size
A_ = hidden_sizes
A_ = depths
A_ = is_training
A_ = use_labels
A_ = hidden_act
A_ = num_labels
A_ = scope
A_ = len(_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Union[str, Any]:
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.num_labels )
A_ = self.get_config()
return config, pixel_values, labels
def __A ( self ) -> Any:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
A_ = TFResNetModel(config=_SCREAMING_SNAKE_CASE )
A_ = model(_SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
A_ = self.num_labels
A_ = TFResNetForImageClassification(_SCREAMING_SNAKE_CASE )
A_ = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ) -> Optional[int]:
A_ = self.prepare_config_and_inputs()
A_ ,A_ ,A_ = config_and_inputs
A_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
__lowercase : List[str] = (
{'feature-extraction': TFResNetModel, 'image-classification': TFResNetForImageClassification}
if is_tf_available()
else {}
)
__lowercase : str = False
__lowercase : Union[str, Any] = False
__lowercase : str = False
__lowercase : Optional[int] = False
__lowercase : Any = False
def __A ( self ) -> List[Any]:
A_ = TFResNetModelTester(self )
A_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self ) -> List[str]:
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def __A ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def __A ( self ) -> List[Any]:
pass
def __A ( self ) -> Dict:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
A_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[Any]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __A ( self ) -> List[str]:
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ = model_class(_SCREAMING_SNAKE_CASE )
A_ = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
A_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ = self.model_tester.num_stages
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ = layer_type
A_ = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> Dict:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def __A ( self ) -> Union[str, Any]:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = TFResNetModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( ) -> Union[str, Any]:
A_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __A ( self ) -> Optional[Any]:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __A ( self ) -> Optional[Any]:
A_ = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''tf''' )
# forward pass
A_ = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
A_ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
A_ = tf.constant([-11.1_069, -9.7_877, -8.3_777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 18 | '''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__snake_case : Optional[int] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
__snake_case : str = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
__snake_case : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def _UpperCAmelCase ( _UpperCamelCase : str ) -> int:
with open(_UpperCamelCase, '''rb''' ) as f:
A_ = Image.open(_UpperCamelCase )
return im.convert('''RGB''' )
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__lowercase : Optional[str] = field(default=_UpperCamelCase , metadata={'help': 'A folder containing the training data.'} )
__lowercase : Optional[str] = field(default=_UpperCamelCase , metadata={'help': 'A folder containing the validation data.'} )
__lowercase : Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
__lowercase : Optional[int] = field(
default=_UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__lowercase : Optional[int] = field(
default=_UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def __A ( self ) -> int:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'''You must specify either a dataset name from the hub or a train and/or validation directory.''' )
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowercase : str = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_UpperCamelCase )} , )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
__lowercase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__lowercase : str = field(default=_UpperCamelCase , metadata={'help': 'Name or path of preprocessor config.'} )
__lowercase : bool = field(
default=_UpperCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__lowercase : bool = field(
default=_UpperCamelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def _UpperCAmelCase ( _UpperCamelCase : str ) -> Dict:
A_ = torch.stack([example['''pixel_values'''] for example in examples] )
A_ = torch.tensor([example['''labels'''] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def _UpperCAmelCase ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A_ ,A_ ,A_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A_ ,A_ ,A_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_image_classification''', _UpperCamelCase, _UpperCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A_ = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
A_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
A_ = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, task='''image-classification''', use_auth_token=True if model_args.use_auth_token else None, )
else:
A_ = {}
if data_args.train_dir is not None:
A_ = os.path.join(data_args.train_dir, '''**''' )
if data_args.validation_dir is not None:
A_ = os.path.join(data_args.validation_dir, '''**''' )
A_ = load_dataset(
'''imagefolder''', data_files=_UpperCamelCase, cache_dir=model_args.cache_dir, task='''image-classification''', )
# If we don't have a validation split, split off a percentage of train as validation.
A_ = None if '''validation''' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split, _UpperCamelCase ) and data_args.train_val_split > 0.0:
A_ = dataset['''train'''].train_test_split(data_args.train_val_split )
A_ = split['''train''']
A_ = split['''test''']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
A_ = dataset['''train'''].features['''labels'''].names
A_ ,A_ = {}, {}
for i, label in enumerate(_UpperCamelCase ):
A_ = str(_UpperCamelCase )
A_ = label
# Load the accuracy metric from the datasets package
A_ = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase : Optional[Any] ):
return metric.compute(predictions=np.argmax(p.predictions, axis=1 ), references=p.label_ids )
A_ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path, num_labels=len(_UpperCamelCase ), labelaid=_UpperCamelCase, idalabel=_UpperCamelCase, finetuning_task='''image-classification''', cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
A_ = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=_UpperCamelCase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, )
A_ = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
A_ = image_processor.size['''shortest_edge''']
else:
A_ = (image_processor.size['''height'''], image_processor.size['''width'''])
A_ = Normalize(mean=image_processor.image_mean, std=image_processor.image_std )
A_ = Compose(
[
RandomResizedCrop(_UpperCamelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
A_ = Compose(
[
Resize(_UpperCamelCase ),
CenterCrop(_UpperCamelCase ),
ToTensor(),
normalize,
] )
def train_transforms(_UpperCamelCase : Dict ):
A_ = [
_train_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']
]
return example_batch
def val_transforms(_UpperCamelCase : Any ):
A_ = [_val_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
A_ = (
dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(_UpperCamelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
A_ = (
dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(_UpperCamelCase )
# Initalize our trainer
A_ = Trainer(
model=_UpperCamelCase, args=_UpperCamelCase, train_dataset=dataset['''train'''] if training_args.do_train else None, eval_dataset=dataset['''validation'''] if training_args.do_eval else None, compute_metrics=_UpperCamelCase, tokenizer=_UpperCamelCase, data_collator=_UpperCamelCase, )
# Training
if training_args.do_train:
A_ = None
if training_args.resume_from_checkpoint is not None:
A_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A_ = last_checkpoint
A_ = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics('''train''', train_result.metrics )
trainer.save_metrics('''train''', train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
A_ = trainer.evaluate()
trainer.log_metrics('''eval''', _UpperCamelCase )
trainer.save_metrics('''eval''', _UpperCamelCase )
# Write model card and (optionally) push to hub
A_ = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''image-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''image-classification''', '''vision'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main()
| 18 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
a : Optional[int] = logging.get_logger(__name__)
def __lowerCamelCase ( _lowercase ) -> List[Any]:
UpperCAmelCase : int = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
UpperCAmelCase : List[str] = 1_2_8
elif "12-12" in model_name:
UpperCAmelCase : str = 1_2
UpperCAmelCase : List[Any] = 1_2
elif "14-14" in model_name:
UpperCAmelCase : int = 1_4
UpperCAmelCase : Union[str, Any] = 1_4
elif "16-16" in model_name:
UpperCAmelCase : Any = 1_6
UpperCAmelCase : Tuple = 1_6
else:
raise ValueError("""Model not supported""" )
UpperCAmelCase : str = """huggingface/label-files"""
if "speech-commands" in model_name:
UpperCAmelCase : Dict = 3_5
UpperCAmelCase : Any = """speech-commands-v2-id2label.json"""
else:
UpperCAmelCase : str = 5_2_7
UpperCAmelCase : Optional[int] = """audioset-id2label.json"""
UpperCAmelCase : List[str] = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase : Any = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase : Any = idalabel
UpperCAmelCase : Dict = {v: k for k, v in idalabel.items()}
return config
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
if "module.v" in name:
UpperCAmelCase : Optional[int] = name.replace("""module.v""" , """audio_spectrogram_transformer""" )
if "cls_token" in name:
UpperCAmelCase : Any = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "dist_token" in name:
UpperCAmelCase : List[str] = name.replace("""dist_token""" , """embeddings.distillation_token""" )
if "pos_embed" in name:
UpperCAmelCase : int = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
UpperCAmelCase : List[str] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
# transformer blocks
if "blocks" in name:
UpperCAmelCase : Optional[Any] = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
UpperCAmelCase : List[Any] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
UpperCAmelCase : List[str] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
UpperCAmelCase : List[Any] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
UpperCAmelCase : List[Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
UpperCAmelCase : Dict = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
UpperCAmelCase : List[str] = name.replace("""mlp.fc2""" , """output.dense""" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
UpperCAmelCase : str = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" )
# classifier head
if "module.mlp_head.0" in name:
UpperCAmelCase : Union[str, Any] = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" )
if "module.mlp_head.1" in name:
UpperCAmelCase : Optional[int] = name.replace("""module.mlp_head.1""" , """classifier.dense""" )
return name
def __lowerCamelCase ( _lowercase , _lowercase ) -> int:
for key in orig_state_dict.copy().keys():
UpperCAmelCase : List[str] = orig_state_dict.pop(_lowercase )
if "qkv" in key:
UpperCAmelCase : Dict = key.split(""".""" )
UpperCAmelCase : Tuple = int(key_split[3] )
UpperCAmelCase : List[str] = config.hidden_size
if "weight" in key:
UpperCAmelCase : Tuple = val[:dim, :]
UpperCAmelCase : Dict = val[dim : dim * 2, :]
UpperCAmelCase : Optional[int] = val[-dim:, :]
else:
UpperCAmelCase : Optional[int] = val[:dim]
UpperCAmelCase : Union[str, Any] = val[dim : dim * 2]
UpperCAmelCase : Tuple = val[-dim:]
else:
UpperCAmelCase : int = val
return orig_state_dict
def __lowerCamelCase ( _lowercase ) -> str:
UpperCAmelCase : Union[str, Any] = [
"""module.v.head.weight""",
"""module.v.head.bias""",
"""module.v.head_dist.weight""",
"""module.v.head_dist.bias""",
]
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase )
@torch.no_grad()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase=False ) -> List[Any]:
UpperCAmelCase : Optional[Any] = get_audio_spectrogram_transformer_config(_lowercase )
UpperCAmelCase : Optional[int] = {
"""ast-finetuned-audioset-10-10-0.4593""": (
"""https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.450""": (
"""https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448""": (
"""https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448-v2""": (
"""https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"""
),
"""ast-finetuned-audioset-12-12-0.447""": (
"""https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"""
),
"""ast-finetuned-audioset-14-14-0.443""": (
"""https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"""
),
"""ast-finetuned-audioset-16-16-0.442""": (
"""https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"""
),
"""ast-finetuned-speech-commands-v2""": (
"""https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"""
),
}
# load original state_dict
UpperCAmelCase : List[Any] = model_name_to_url[model_name]
UpperCAmelCase : Optional[Any] = torch.hub.load_state_dict_from_url(_lowercase , map_location="""cpu""" )
# remove some keys
remove_keys(_lowercase )
# rename some keys
UpperCAmelCase : Dict = convert_state_dict(_lowercase , _lowercase )
# load 🤗 model
UpperCAmelCase : Optional[int] = ASTForAudioClassification(_lowercase )
model.eval()
model.load_state_dict(_lowercase )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
UpperCAmelCase : int = -4.267_7393 if """speech-commands""" not in model_name else -6.84_5978
UpperCAmelCase : Optional[Any] = 4.568_9974 if """speech-commands""" not in model_name else 5.565_4526
UpperCAmelCase : str = 1_0_2_4 if """speech-commands""" not in model_name else 1_2_8
UpperCAmelCase : List[str] = ASTFeatureExtractor(mean=_lowercase , std=_lowercase , max_length=_lowercase )
if "speech-commands" in model_name:
UpperCAmelCase : Any = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" )
UpperCAmelCase : List[str] = dataset[0]["""audio"""]["""array"""]
else:
UpperCAmelCase : Optional[int] = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , )
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = torchaudio.load(_lowercase )
UpperCAmelCase : Union[str, Any] = waveform.squeeze().numpy()
UpperCAmelCase : Tuple = feature_extractor(_lowercase , sampling_rate=1_6_0_0_0 , return_tensors="""pt""" )
# forward pass
UpperCAmelCase : Union[str, Any] = model(**_lowercase )
UpperCAmelCase : List[str] = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
UpperCAmelCase : Optional[Any] = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
UpperCAmelCase : Tuple = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
UpperCAmelCase : Tuple = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
UpperCAmelCase : Union[str, Any] = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
UpperCAmelCase : Any = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
UpperCAmelCase : Any = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
UpperCAmelCase : List[Any] = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
UpperCAmelCase : Union[str, Any] = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError("""Unknown model name""" )
if not torch.allclose(logits[0, :3] , _lowercase , atol=1e-4 ):
raise ValueError("""Logits don't match""" )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(_lowercase )
if push_to_hub:
print("""Pushing model and feature extractor to the hub...""" )
model.push_to_hub(F'''MIT/{model_name}''' )
feature_extractor.push_to_hub(F'''MIT/{model_name}''' )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
a : List[str] = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 265 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
a : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A , A , A , A , A , A , A , ) -> Optional[Any]:
super().__init__()
self.register_modules(
vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , )
def _lowercase( self , A = "auto" ) -> List[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def _lowercase( self ) -> Dict:
self.enable_attention_slicing(A )
@torch.no_grad()
def __call__( self , A , A = 512 , A = 512 , A = 50 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , A = None , **A , ) -> List[Any]:
if isinstance(A , A ):
UpperCAmelCase : List[str] = 1
elif isinstance(A , A ):
UpperCAmelCase : Dict = len(A )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(A )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(A )}.''' )
# get prompt text embeddings
UpperCAmelCase : List[str] = self.tokenizer(
A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
UpperCAmelCase : List[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase : int = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCAmelCase : Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
UpperCAmelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = text_embeddings.shape
UpperCAmelCase : List[str] = text_embeddings.repeat(1 , A , 1 )
UpperCAmelCase : List[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCAmelCase : Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase : List[str]
if negative_prompt is None:
UpperCAmelCase : Any = [""""""]
elif type(A ) is not type(A ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(A )} !='''
f''' {type(A )}.''' )
elif isinstance(A , A ):
UpperCAmelCase : Optional[int] = [negative_prompt]
elif batch_size != len(A ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
UpperCAmelCase : Any = negative_prompt
UpperCAmelCase : Dict = text_input_ids.shape[-1]
UpperCAmelCase : List[Any] = self.tokenizer(
A , padding="""max_length""" , max_length=A , truncation=A , return_tensors="""pt""" , )
UpperCAmelCase : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase : int = uncond_embeddings.shape[1]
UpperCAmelCase : List[Any] = uncond_embeddings.repeat(A , A , 1 )
UpperCAmelCase : List[str] = uncond_embeddings.view(batch_size * num_images_per_prompt , A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCAmelCase : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCAmelCase : Optional[int] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
UpperCAmelCase : str = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCAmelCase : Dict = torch.randn(
A , generator=A , device="""cpu""" , dtype=A ).to(self.device )
UpperCAmelCase : int = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to(
self.device )
else:
UpperCAmelCase : int = torch.randn(
A , generator=A , device=self.device , dtype=A )
UpperCAmelCase : int = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
UpperCAmelCase : Optional[Any] = latents_reference.to(self.device )
UpperCAmelCase : Tuple = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
UpperCAmelCase : int = (latents_shape[3] - latents_shape_reference[3]) // 2
UpperCAmelCase : List[str] = (latents_shape[2] - latents_shape_reference[2]) // 2
UpperCAmelCase : Union[str, Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
UpperCAmelCase : Union[str, Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
UpperCAmelCase : Optional[int] = 0 if dx < 0 else dx
UpperCAmelCase : List[str] = 0 if dy < 0 else dy
UpperCAmelCase : Union[str, Any] = max(-dx , 0 )
UpperCAmelCase : List[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
UpperCAmelCase : str = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCAmelCase : Union[str, Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase : Optional[int] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase : int = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase : Optional[Any] = {}
if accepts_eta:
UpperCAmelCase : List[str] = eta
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase : str = self.scheduler.scale_model_input(A , A )
# predict the noise residual
UpperCAmelCase : Any = self.unet(A , A , encoder_hidden_states=A ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase : Any = noise_pred.chunk(2 )
UpperCAmelCase : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase : Dict = self.scheduler.step(A , A , A , **A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A , A )
UpperCAmelCase : Union[str, Any] = 1 / 0.1_8_2_1_5 * latents
UpperCAmelCase : Tuple = self.vae.decode(A ).sample
UpperCAmelCase : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
UpperCAmelCase : int = self.feature_extractor(self.numpy_to_pil(A ) , return_tensors="""pt""" ).to(
self.device )
UpperCAmelCase , UpperCAmelCase : int = self.safety_checker(
images=A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
UpperCAmelCase : Any = None
if output_type == "pil":
UpperCAmelCase : int = self.numpy_to_pil(A )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 265 | 1 |
def lowerCamelCase_ ( _a : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = len(_a )
while cur > 1:
# Find the maximum number in arr
UpperCAmelCase_ : Tuple = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
UpperCAmelCase_ : Optional[Any] = arr[mi::-1] + arr[mi + 1 : len(_a )]
# Reverse whole list
UpperCAmelCase_ : Union[str, Any] = arr[cur - 1 :: -1] + arr[cur : len(_a )]
cur -= 1
return arr
if __name__ == "__main__":
UpperCamelCase_ = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase_ = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 368 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[int] = "roformer"
def __init__( self: Optional[int] ,lowerCamelCase_: Tuple=50000 ,lowerCamelCase_: Optional[int]=None ,lowerCamelCase_: List[Any]=768 ,lowerCamelCase_: List[Any]=12 ,lowerCamelCase_: Optional[int]=12 ,lowerCamelCase_: Optional[Any]=3072 ,lowerCamelCase_: int="gelu" ,lowerCamelCase_: str=0.1 ,lowerCamelCase_: Union[str, Any]=0.1 ,lowerCamelCase_: Any=1536 ,lowerCamelCase_: str=2 ,lowerCamelCase_: Optional[int]=0.0_2 ,lowerCamelCase_: int=1e-12 ,lowerCamelCase_: Optional[int]=0 ,lowerCamelCase_: Any=False ,lowerCamelCase_: Union[str, Any]=True ,**lowerCamelCase_: List[str] ,) -> Tuple:
super().__init__(pad_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : Optional[int] = hidden_size if embedding_size is None else embedding_size
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : Any = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Optional[Any] = type_vocab_size
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : Optional[Any] = rotary_value
UpperCAmelCase_ : str = use_cache
class _snake_case ( __snake_case ):
'''simple docstring'''
@property
def A__ ( self: Tuple ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase_ : Tuple = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCAmelCase_ : Optional[Any] = {0: """batch""", 1: """sequence"""}
UpperCAmelCase_ : Any = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 59 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :Dict = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : List[Any] = """mobilenet_v2"""
def __init__( self : str , snake_case_ : List[str]=3 , snake_case_ : Any=2_2_4 , snake_case_ : Union[str, Any]=1.0 , snake_case_ : int=8 , snake_case_ : List[str]=8 , snake_case_ : Dict=6 , snake_case_ : Union[str, Any]=3_2 , snake_case_ : Optional[int]=True , snake_case_ : Optional[Any]=True , snake_case_ : Optional[Any]="relu6" , snake_case_ : int=True , snake_case_ : Any=0.8 , snake_case_ : List[str]=0.0_2 , snake_case_ : Optional[int]=0.0_0_1 , snake_case_ : Dict=2_5_5 , **snake_case_ : Dict , ):
super().__init__(**snake_case_ )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = depth_multiplier
_UpperCAmelCase = depth_divisible_by
_UpperCAmelCase = min_depth
_UpperCAmelCase = expand_ratio
_UpperCAmelCase = output_stride
_UpperCAmelCase = first_layer_is_expansion
_UpperCAmelCase = finegrained_output
_UpperCAmelCase = hidden_act
_UpperCAmelCase = tf_padding
_UpperCAmelCase = classifier_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = semantic_loss_ignore_index
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[int] = version.parse("""1.11""" )
@property
def lowercase ( self : Optional[int] ):
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def lowercase ( self : Union[str, Any] ):
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def lowercase ( self : List[Any] ):
return 1e-4
| 22 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = len(SCREAMING_SNAKE_CASE__ )
# We need to create solution object to save path.
snake_case_ = [[0 for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )]
snake_case_ = run_maze(SCREAMING_SNAKE_CASE__ , 0 , 0 , SCREAMING_SNAKE_CASE__ )
if solved:
print('''\n'''.join(str(SCREAMING_SNAKE_CASE__ ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = len(SCREAMING_SNAKE_CASE__ )
# Final check point.
if i == j == (size - 1):
snake_case_ = 1
return True
snake_case_ = (not i < 0) and (not j < 0) # Check lower bounds
snake_case_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
snake_case_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
snake_case_ = 1
# check for directions
if (
run_maze(SCREAMING_SNAKE_CASE__ , i + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j + 1 , SCREAMING_SNAKE_CASE__ )
or run_maze(SCREAMING_SNAKE_CASE__ , i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j - 1 , SCREAMING_SNAKE_CASE__ )
):
return True
snake_case_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 0 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> float:
UpperCAmelCase__ : Tuple = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('''All input parameters must be positive''' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('''Relative densities cannot be greater than one''' )
else:
UpperCAmelCase__ : List[str] = 1 - (matter_density + radiation_density + dark_energy)
UpperCAmelCase__ : List[str] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
UpperCAmelCase__ : Any = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
UpperCamelCase__ = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 299 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class lowerCamelCase_ :
def __init__( self : List[Any] , _A : int | None = None ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = value
UpperCAmelCase__ : Node | None = None # Added in order to delete a node easier
UpperCAmelCase__ : Node | None = None
UpperCAmelCase__ : Node | None = None
def __repr__( self : Optional[Any] ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f"""{self.value}""": (self.left, self.right)} , indent=1 )
class lowerCamelCase_ :
def __init__( self : Optional[Any] , _A : Node | None = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = root
def __str__( self : Union[str, Any] ):
'''simple docstring'''
return str(self.root )
def lowercase_ ( self : str , _A : Node , _A : Node | None ):
'''simple docstring'''
if new_children is not None: # reset its kids
UpperCAmelCase__ : Dict = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_A ): # If it is the right children
UpperCAmelCase__ : str = new_children
else:
UpperCAmelCase__ : Optional[int] = new_children
else:
UpperCAmelCase__ : Union[str, Any] = new_children
def lowercase_ ( self : Union[str, Any] , _A : Node ):
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def lowercase_ ( self : int ):
'''simple docstring'''
return self.root is None
def lowercase_ ( self : List[str] , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Dict = Node(_A ) # create a new Node
if self.empty(): # if Tree is empty
UpperCAmelCase__ : List[Any] = new_node # set its root
else: # Tree is not empty
UpperCAmelCase__ : str = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
UpperCAmelCase__ : Optional[Any] = new_node # We insert the new node in a leaf
break
else:
UpperCAmelCase__ : Any = parent_node.left
else:
if parent_node.right is None:
UpperCAmelCase__ : str = new_node
break
else:
UpperCAmelCase__ : List[str] = parent_node.right
UpperCAmelCase__ : Tuple = parent_node
def lowercase_ ( self : Optional[Any] , *_A : Tuple ):
'''simple docstring'''
for value in values:
self.__insert(_A )
def lowercase_ ( self : Union[str, Any] , _A : int ):
'''simple docstring'''
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
UpperCAmelCase__ : List[Any] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
UpperCAmelCase__ : str = node.left if value < node.value else node.right
return node
def lowercase_ ( self : List[Any] , _A : Node | None = None ):
'''simple docstring'''
if node is None:
if self.root is None:
return None
UpperCAmelCase__ : int = self.root
if not self.empty():
while node.right is not None:
UpperCAmelCase__ : Tuple = node.right
return node
def lowercase_ ( self : List[Any] , _A : Node | None = None ):
'''simple docstring'''
if node is None:
UpperCAmelCase__ : Optional[int] = self.root
if self.root is None:
return None
if not self.empty():
UpperCAmelCase__ : Optional[int] = self.root
while node.left is not None:
UpperCAmelCase__ : Tuple = node.left
return node
def lowercase_ ( self : List[Any] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.search(_A ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_A , _A )
elif node.left is None: # Has only right children
self.__reassign_nodes(_A , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_A , node.left )
else:
UpperCAmelCase__ : Union[str, Any] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
UpperCAmelCase__ : Optional[Any] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def lowercase_ ( self : List[str] , _A : Node | None ):
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def lowercase_ ( self : str , _A : Any=None ):
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def lowercase_ ( self : Dict , _A : list , _A : Node | None ):
'''simple docstring'''
if node:
self.inorder(_A , node.left )
arr.append(node.value )
self.inorder(_A , node.right )
def lowercase_ ( self : Optional[Any] , _A : int , _A : Node ):
'''simple docstring'''
UpperCAmelCase__ : list[int] = []
self.inorder(_A , _A ) # append all values to list using inorder traversal
return arr[k - 1]
def a__ ( lowerCAmelCase__ ) -> list[Node]:
UpperCAmelCase__ : Union[str, Any] = []
if curr_node is not None:
UpperCAmelCase__ : str = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def a__ ( ) -> None:
UpperCAmelCase__ : List[Any] = (8, 3, 6, 1, 10, 14, 13, 4, 7)
UpperCAmelCase__ : str = BinarySearchTree()
for i in testlist:
t.insert(lowerCAmelCase__ )
# Prints all the elements of the list in order traversal
print(lowerCAmelCase__ )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' , t.get_max().value ) # type: ignore
print('''Min Value: ''' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(lowerCAmelCase__ )
print(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 299 | 1 |
import math
def A_ ( snake_case : Any ) -> int:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A_ ( snake_case : List[str] = 0.1 ) -> Tuple:
'''simple docstring'''
__UpperCamelCase = 3
__UpperCamelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(snake_case )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 328 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
A__ = TypeVar("""T""")
A__ = TypeVar("""U""")
class __lowerCAmelCase ( Generic[T, U] ):
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = key
_lowerCAmelCase = val
_lowerCAmelCase = None
_lowerCAmelCase = None
def __repr__( self ):
"""simple docstring"""
return (
F'Node: key: {self.key}, val: {self.val}, '
F'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class __lowerCAmelCase ( Generic[T, U] ):
def __init__( self ):
"""simple docstring"""
_lowerCAmelCase = DoubleLinkedListNode(_snake_case , _snake_case )
_lowerCAmelCase = DoubleLinkedListNode(_snake_case , _snake_case )
_lowerCAmelCase , _lowerCAmelCase = self.rear, self.head
def __repr__( self ):
"""simple docstring"""
_lowerCAmelCase = ["""DoubleLinkedList"""]
_lowerCAmelCase = self.head
while node.next is not None:
rep.append(str(_snake_case ) )
_lowerCAmelCase = node.next
rep.append(str(self.rear ) )
return ",\n ".join(_snake_case )
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_lowerCAmelCase = node
_lowerCAmelCase = previous
_lowerCAmelCase = node
_lowerCAmelCase = self.rear
def snake_case ( self , _snake_case ):
"""simple docstring"""
if node.prev is None or node.next is None:
return None
_lowerCAmelCase = node.next
_lowerCAmelCase = node.prev
_lowerCAmelCase = None
_lowerCAmelCase = None
return node
class __lowerCAmelCase ( Generic[T, U] ):
__lowerCamelCase = {}
def __init__( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = DoubleLinkedList()
_lowerCAmelCase = capacity
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = {}
def __repr__( self ):
"""simple docstring"""
return (
F'CacheInfo(hits={self.hits}, misses={self.miss}, '
F'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self , _snake_case ):
"""simple docstring"""
return key in self.cache
def snake_case ( self , _snake_case ):
"""simple docstring"""
if key in self.cache:
self.hits += 1
_lowerCAmelCase = self.cache[key]
_lowerCAmelCase = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(_snake_case )
return node.val
self.miss += 1
return None
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_lowerCAmelCase = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(_snake_case ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_lowerCAmelCase = DoubleLinkedListNode(_snake_case , _snake_case )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_lowerCAmelCase = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_lowerCAmelCase = value
self.list.add(_snake_case )
@classmethod
def snake_case ( cls , _snake_case = 128 ):
"""simple docstring"""
def cache_decorator_inner(_snake_case ) -> Callable[..., U]:
def cache_decorator_wrapper(*_snake_case ) -> U:
if func not in cls.decorator_function_to_instance_map:
_lowerCAmelCase = LRUCache(_snake_case )
_lowerCAmelCase = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_lowerCAmelCase = func(*_snake_case )
cls.decorator_function_to_instance_map[func].put(args[0] , _snake_case )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(_snake_case , """cache_info""" , _snake_case ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__snake_case :Any = False
class _A ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''')
pipe.to(__SCREAMING_SNAKE_CASE)
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''')
__a = torch.manual_seed(0)
__a = pipe(
image=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
__a = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__a = np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 131 |
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def __snake_case ( _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class _A :
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Any):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
pass
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=None , **__SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a = VisionTextDualEncoderConfig.from_vision_text_configs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = TFVisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE)
__a = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim))
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim))
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str]=None , **__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a , __a = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = TFVisionTextDualEncoderModel(vision_model=__SCREAMING_SNAKE_CASE , text_model=__SCREAMING_SNAKE_CASE)
__a = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim))
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int]=None , **__SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a , __a = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = {'''vision_model''': vision_model, '''text_model''': text_model}
__a = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**__SCREAMING_SNAKE_CASE)
__a = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim))
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict=None , **__SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a , __a = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = TFVisionTextDualEncoderModel(vision_model=__SCREAMING_SNAKE_CASE , text_model=__SCREAMING_SNAKE_CASE)
__a = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)
__a = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__SCREAMING_SNAKE_CASE)
__a = TFVisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE)
__a = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)
__a = after_output[0].numpy()
__a = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1E-5)
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int=None , **__SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a , __a = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = TFVisionTextDualEncoderModel(vision_model=__SCREAMING_SNAKE_CASE , text_model=__SCREAMING_SNAKE_CASE)
__a = model(
input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE)
__a = output.vision_model_output.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__a = to_atuple(vision_model.config.image_size)
__a = to_atuple(vision_model.config.patch_size)
__a = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__a = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
__a = output.text_model_output.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : float):
'''simple docstring'''
__a = np.abs((a - b)).max()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , F'Difference between torch and flax is {diff} (>= {tol}).')
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
self.check_save_load(**__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a , __a = self.get_pretrained_model_and_inputs()
__a = model_a(**__SCREAMING_SNAKE_CASE)
__a = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__SCREAMING_SNAKE_CASE)
__a = TFVisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE)
__a = model_a(**__SCREAMING_SNAKE_CASE)
__a = after_outputs[0].numpy()
__a = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1E-5)
@require_tf
class _A ( __UpperCAmelCase ,unittest.TestCase ):
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-random-bert''')
__a = 13
__a = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
__a = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
__a = random_attention_mask([batch_size, 4])
__a = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a = TFViTModel(__SCREAMING_SNAKE_CASE , name='''vision_model''')
__a = TFBertModel(__SCREAMING_SNAKE_CASE , name='''text_model''')
return vision_model, text_model
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = TFViTModelTester(self)
__a = TFBertModelTester(self)
__a = vit_model_tester.prepare_config_and_inputs()
__a = bert_model_tester.prepare_config_and_inputs()
__a , __a , __a = vision_config_and_inputs
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _A ( __UpperCAmelCase ,unittest.TestCase ):
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-deit-tf''' , '''hf-internal-testing/tiny-random-roberta''')
__a = 13
__a = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
__a = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
__a = random_attention_mask([batch_size, 4])
__a = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any]=None , **__SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
__a , __a = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = TFVisionTextDualEncoderModel(vision_model=__SCREAMING_SNAKE_CASE , text_model=__SCREAMING_SNAKE_CASE)
__a = model(
input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE)
__a = output.vision_model_output.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , vision_config.num_hidden_layers)
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__a = to_atuple(vision_model.config.image_size)
__a = to_atuple(vision_model.config.patch_size)
__a = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__a = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
__a = output.text_model_output.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = TFDeiTModel(__SCREAMING_SNAKE_CASE , name='''vision_model''')
__a = TFRobertaModel(__SCREAMING_SNAKE_CASE , name='''text_model''')
return vision_model, text_model
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = TFDeiTModelTester(self)
__a = TFRobertaModelTester(self)
__a = vit_model_tester.prepare_config_and_inputs()
__a = bert_model_tester.prepare_config_and_inputs()
__a , __a , __a = vision_config_and_inputs
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _A ( __UpperCAmelCase ,unittest.TestCase ):
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-clip-tf''' , '''hf-internal-testing/tiny-random-bert''')
__a = 13
__a = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
__a = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
__a = random_attention_mask([batch_size, 4])
__a = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
__a = TFCLIPVisionModel(__SCREAMING_SNAKE_CASE , name='''vision_model''')
__a = TFBertModel(__SCREAMING_SNAKE_CASE , name='''text_model''')
return vision_model, text_model
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = TFCLIPVisionModelTester(self)
__a = TFBertModelTester(self)
__a = clip_model_tester.prepare_config_and_inputs()
__a = bert_model_tester.prepare_config_and_inputs()
__a , __a = vision_config_and_inputs
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class _A ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = TFVisionTextDualEncoderModel.from_pretrained(
'''clip-italian/clip-italian''' , logit_scale_init_value=1.0 , from_pt=__SCREAMING_SNAKE_CASE)
__a = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''')
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
__a = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors='''np''')
__a = model(**__SCREAMING_SNAKE_CASE)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__a = np.array([[1.2_28_47_27, 0.3_10_41_22]])
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , __SCREAMING_SNAKE_CASE , atol=1E-3))
| 131 | 1 |
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def a_ ( _lowerCAmelCase : Dict ):
'''simple docstring'''
lowercase__ : int = tf.convert_to_tensor(_lowerCAmelCase )
lowercase__ : List[str] = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def a_ ( _lowerCAmelCase : Dict ):
'''simple docstring'''
lowercase__ : int = tf.convert_to_tensor(_lowerCAmelCase )
lowercase__ : str = tf.cast(math.pi , x.dtype )
lowercase__ : str = tf.cast(0.0_4_4_7_1_5 , x.dtype )
lowercase__ : Union[str, Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(_lowerCAmelCase , 3 )) ))
return x * cdf
def a_ ( _lowerCAmelCase : Any ):
'''simple docstring'''
lowercase__ : int = tf.convert_to_tensor(_lowerCAmelCase )
return x * tf.tanh(tf.math.softplus(_lowerCAmelCase ) )
def a_ ( _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : Optional[Any] = tf.convert_to_tensor(_lowerCAmelCase )
lowercase__ : List[Any] = tf.cast(0.0_4_4_7_1_5 , x.dtype )
lowercase__ : str = tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Optional[int] = tf.convert_to_tensor(_lowerCAmelCase )
lowercase__ : Any = tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
return tf.clip_by_value(_gelu(_lowerCAmelCase ) , -10 , 10 )
def a_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int]=-1 ):
'''simple docstring'''
lowercase__ , lowercase__ : Union[str, Any] = tf.split(_lowerCAmelCase , 2 , axis=_lowerCAmelCase )
return a * tf.math.sigmoid(_lowerCAmelCase )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def a_ ( _lowerCAmelCase : List[str] ):
'''simple docstring'''
return tf.keras.activations.gelu(_lowerCAmelCase , approximate=_lowerCAmelCase )
_UpperCamelCase : Union[str, Any] = tf.keras.activations.gelu
_UpperCamelCase : int = approximate_gelu_wrap
else:
_UpperCamelCase : Union[str, Any] = _gelu
_UpperCamelCase : Optional[Any] = _gelu_new
_UpperCamelCase : Union[str, Any] = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def a_ ( _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
| 77 | """simple docstring"""
_UpperCamelCase : Union[str, Any] = 8.3_1_4_4_5_9_8
def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : float ):
'''simple docstring'''
if temperature < 0:
raise Exception('Temperature cannot be less than 0 K' )
if molar_mass <= 0:
raise Exception('Molar mass cannot be less than or equal to 0 kg/mol' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_UpperCamelCase : List[Any] = 3_00
_UpperCamelCase : Tuple = 28
_UpperCamelCase : Any = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 77 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase__ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Any=8 ) -> Optional[Any]:
_snake_case = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_snake_case = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCAmelCase__ ( A_ ):
def __init__( self : List[str] , _lowerCamelCase : UNetaDConditionModel , _lowerCamelCase : DDPMScheduler , _lowerCamelCase : VQModel , ):
super().__init__()
self.register_modules(
unet=_lowerCamelCase , scheduler=_lowerCamelCase , movq=_lowerCamelCase , )
_snake_case = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowercase ( self : Tuple , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] ):
if latents is None:
_snake_case = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=_lowerCamelCase , dtype=_lowerCamelCase )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_snake_case = latents.to(_lowerCamelCase )
_snake_case = latents * scheduler.init_noise_sigma
return latents
def lowercase ( self : List[Any] , _lowerCamelCase : Optional[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
_snake_case = torch.device(f'''cuda:{gpu_id}''' )
_snake_case = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCamelCase , _lowerCamelCase )
def lowercase ( self : List[str] , _lowerCamelCase : List[str]=0 ):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
_snake_case = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=_lowerCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_snake_case = None
for cpu_offloaded_model in [self.unet, self.movq]:
_snake_case , _snake_case = cpu_offload_with_hook(_lowerCamelCase , _lowerCamelCase , prev_module_hook=_lowerCamelCase )
# We'll offload the last model manually.
_snake_case = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowercase ( self : List[str] ):
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowerCamelCase , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowerCamelCase )
def __call__( self : Any , _lowerCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowerCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowerCamelCase : int = 512 , _lowerCamelCase : int = 512 , _lowerCamelCase : int = 100 , _lowerCamelCase : float = 4.0 , _lowerCamelCase : int = 1 , _lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowerCamelCase : Optional[torch.FloatTensor] = None , _lowerCamelCase : Optional[str] = "pil" , _lowerCamelCase : bool = True , ):
_snake_case = self._execution_device
_snake_case = guidance_scale > 1.0
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_snake_case = torch.cat(_lowerCamelCase , dim=0 )
_snake_case = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_snake_case = torch.cat(_lowerCamelCase , dim=0 )
if do_classifier_free_guidance:
_snake_case = image_embeds.repeat_interleave(_lowerCamelCase , dim=0 )
_snake_case = negative_image_embeds.repeat_interleave(_lowerCamelCase , dim=0 )
_snake_case = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowerCamelCase )
self.scheduler.set_timesteps(_lowerCamelCase , device=_lowerCamelCase )
_snake_case = self.scheduler.timesteps
_snake_case = self.unet.config.in_channels
_snake_case , _snake_case = downscale_height_and_width(_lowerCamelCase , _lowerCamelCase , self.movq_scale_factor )
# create initial latent
_snake_case = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
_snake_case = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_snake_case = {'''image_embeds''': image_embeds}
_snake_case = self.unet(
sample=_lowerCamelCase , timestep=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , added_cond_kwargs=_lowerCamelCase , return_dict=_lowerCamelCase , )[0]
if do_classifier_free_guidance:
_snake_case , _snake_case = noise_pred.split(latents.shape[1] , dim=1 )
_snake_case , _snake_case = noise_pred.chunk(2 )
_snake_case , _snake_case = variance_pred.chunk(2 )
_snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_snake_case = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_snake_case , _snake_case = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_snake_case = self.scheduler.step(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase , )[0]
# post-processing
_snake_case = self.movq.decode(_lowerCamelCase , force_not_quantize=_lowerCamelCase )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
_snake_case = image * 0.5 + 0.5
_snake_case = image.clamp(0 , 1 )
_snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_snake_case = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCamelCase )
| 40 |
"""simple docstring"""
from timeit import timeit
UpperCAmelCase__ = {
'MALAYALAM': True,
'String': False,
'rotor': True,
'level': True,
'A': True,
'BB': True,
'ABC': False,
'amanaplanacanalpanama': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def _UpperCAmelCase ( __lowerCamelCase : str ) -> bool:
_snake_case = 0
_snake_case = len(__lowerCamelCase ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def _UpperCAmelCase ( __lowerCamelCase : str ) -> bool:
_snake_case = len(__lowerCamelCase ) // 2
_snake_case = len(__lowerCamelCase )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(__lowerCamelCase ) )
def _UpperCAmelCase ( __lowerCamelCase : str ) -> bool:
if len(__lowerCamelCase ) <= 2:
return True
if s[0] == s[len(__lowerCamelCase ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def _UpperCAmelCase ( __lowerCamelCase : str ) -> bool:
return s == s[::-1]
def _UpperCAmelCase ( __lowerCamelCase : str ) -> None:
_snake_case = f'''all({name}(key) is value for key, value in test_data.items())'''
_snake_case = f'''from __main__ import test_data, {name}'''
_snake_case = 50_00_00
_snake_case = timeit(stmt=__lowerCamelCase , setup=__lowerCamelCase , number=__lowerCamelCase )
print(f'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F"{key:21} {value}")
print('a man a plan a canal panama')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('is_palindrome_slice')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('is_palindrome')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('is_palindrome_recursive')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('is_palindrome_traversal')
| 40 | 1 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Dict = [
"word_embeddings_layernorm.weight",
"word_embeddings_layernorm.bias",
"input_layernorm.weight",
"input_layernorm.bias",
"post_attention_layernorm.weight",
"post_attention_layernorm.bias",
"self_attention.dense.bias",
"mlp.dense_4h_to_h.bias",
"ln_f.weight",
"ln_f.bias",
]
__UpperCamelCase : Tuple = [
"mlp.dense_4h_to_h.weight",
"self_attention.dense.weight",
]
def __A ( __lowerCamelCase , __lowerCamelCase ) -> Any:
a = {
"""word_embeddings.weight""": """word_embeddings.weight""",
"""word_embeddings.norm.weight""": """word_embeddings_layernorm.weight""",
"""word_embeddings.norm.bias""": """word_embeddings_layernorm.bias""",
"""weight""": """ln_f.weight""",
"""bias""": """ln_f.bias""",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
a = int(re.match(R""".*layer_(\d*).*""" , __lowerCamelCase )[1] )
layer_number -= 3
return f'h.{layer_number}.' + key
def __A ( __lowerCamelCase ) -> Any:
if dtype == torch.bool:
return 1 / 8
a = re.search(R"""[^\d](\d+)$""" , str(__lowerCamelCase ) )
if bit_search is None:
raise ValueError(f'`dtype` is not a valid dtype: {dtype}.' )
a = int(bit_search.groups()[0] )
return bit_size // 8
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
# Construct model
if bloom_config_file == "":
a = BloomConfig()
else:
a = BloomConfig.from_json_file(__lowerCamelCase )
if shard_model:
a = os.listdir(__lowerCamelCase )
a = sorted(filter(lambda __lowerCamelCase : s.startswith("""layer""" ) and "model_00" in s , __lowerCamelCase ) )
a = {"""weight_map""": {}, """metadata""": {}}
a = 0
a = None
a = BloomConfig()
for j, file in enumerate(__lowerCamelCase ):
print("""Processing file: {}""".format(__lowerCamelCase ) )
a = None
for i in range(__lowerCamelCase ):
# load all TP files
a = file.replace("""model_00""" , f'model_0{i}' )
a = torch.load(os.path.join(__lowerCamelCase , __lowerCamelCase ) , map_location="""cpu""" )
# Rename keys in the transformers names
a = list(temp.keys() )
for key in keys:
a = temp.pop(__lowerCamelCase )
if tensors is None:
a = temp
else:
for key in tensors.keys():
if any(key.endswith(__lowerCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
a = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
a = torch.cat([tensors[key], temp[key]] , dim=__lowerCamelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__lowerCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
a = tensors[key] / pretraining_tp
torch.save(
__lowerCamelCase , os.path.join(
__lowerCamelCase , """pytorch_model_{}-of-{}.bin""".format(str(j + 1 ).zfill(5 ) , str(len(__lowerCamelCase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
a = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
a = """pytorch_model_{}-of-{}.bin""".format(
str(j + 1 ).zfill(5 ) , str(len(__lowerCamelCase ) ).zfill(5 ) )
a = BloomConfig()
a = pytorch_dump_folder_path + """/""" + CONFIG_NAME
a = total_size
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(__lowerCamelCase , WEIGHTS_NAME + """.index.json""" ) , """w""" , encoding="""utf-8""" ) as f:
a = json.dumps(__lowerCamelCase , indent=2 , sort_keys=__lowerCamelCase ) + """\n"""
f.write(__lowerCamelCase )
else:
a = BloomModel(__lowerCamelCase )
a = os.listdir(__lowerCamelCase )
a = sorted(filter(lambda __lowerCamelCase : s.startswith("""layer""" ) and "model_00" in s , __lowerCamelCase ) )
a = None
for i, file in enumerate(__lowerCamelCase ):
a = None
for i in range(__lowerCamelCase ):
# load all TP files
a = file.replace("""model_00""" , f'model_0{i}' )
a = torch.load(os.path.join(__lowerCamelCase , __lowerCamelCase ) , map_location="""cpu""" )
# Rename keys in the transformers names
a = list(temp.keys() )
for key in keys:
a = temp.pop(__lowerCamelCase )
if tensors is None:
a = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(__lowerCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
a = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
a = torch.cat([tensors[key], temp[key]] , dim=__lowerCamelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__lowerCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
a = tensors[key] / pretraining_tp
a = model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
assert not other_keys.unexpected_keys, f'The keys {other_keys.unexpected_keys} are unexpected'
if missing_keys is None:
a = set(other_keys.missing_keys )
else:
a = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'The keys {missing_keys} are missing'
# Save pytorch-model
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
a = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
a = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}' )
if config.torch_dtype is not None:
a = model.to(config.torch_dtype )
torch.save(model.state_dict() , __lowerCamelCase )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bloom_checkpoint_path",
default=None,
type=str,
required=True,
help="Path to the Megatron-LM checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--bloom_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--shard_model",
action="store_true",
help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint",
)
parser.add_argument(
"--pretraining_tp",
default=4,
type=int,
help="Pretraining TP rank that has been used when training the model in Megatron-LM \n",
)
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 228 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = CLIPTokenizer
UpperCamelCase__ = CLIPTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = {}
UpperCamelCase__ = False
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
super().setUp()
# fmt: off
a = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
a = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
a = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
a = {"""unk_token""": """<unk>"""}
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__magic_name__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__magic_name__ ) )
def lowerCamelCase__ ( self :Optional[Any] , **__magic_name__ :Union[str, Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def lowerCamelCase__ ( self :int , **__magic_name__ :Optional[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__magic_name__ )
def lowerCamelCase__ ( self :int , __magic_name__ :List[str] ):
'''simple docstring'''
a = """lower newer"""
a = """lower newer"""
return input_text, output_text
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a = """lower newer"""
a = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
a = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
a = tokens + [tokenizer.unk_token]
a = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
@require_ftfy
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a = self.tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
a = self.rust_tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
a = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
a = tokenizer_s.tokenize(__magic_name__ )
a = tokenizer_r.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
a = """xa\u0303y""" + """ """ + """x\xe3y"""
a = tokenizer_s.tokenize(__magic_name__ )
a = tokenizer_r.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
# Test that the tokenization is identical on unicode of space type
a = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
a = tokenizer_s.tokenize(__magic_name__ )
a = tokenizer_r.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
# Test that the tokenization is identical on unicode of line break type
a = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
a = tokenizer_s.tokenize(__magic_name__ )
a = tokenizer_r.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
a = F'{text_of_1_token} {text_of_1_token}'
a = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , )
a = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__magic_name__ ) + 1, len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
a = F' {text}'
a = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , )
a = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__magic_name__ ) + 1, 1 + len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
with self.assertRaises(__magic_name__ ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
super().test_tokenization_python_rust_equals()
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
pass
| 228 | 1 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowercase_ = XGLMConfig
lowercase_ = {}
lowercase_ = "gelu"
def __init__(self : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple=14 , UpperCAmelCase_ : Tuple=7 , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[int]=99 , UpperCAmelCase_ : Dict=32 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : Dict=37 , UpperCAmelCase_ : Union[str, Any]="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : int=0.02 , ) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =parent
lowerCamelCase__: List[str] =batch_size
lowerCamelCase__: Tuple =seq_length
lowerCamelCase__: List[Any] =is_training
lowerCamelCase__: List[str] =use_input_mask
lowerCamelCase__: Optional[int] =use_labels
lowerCamelCase__: int =vocab_size
lowerCamelCase__: str =d_model
lowerCamelCase__: List[str] =num_hidden_layers
lowerCamelCase__: List[str] =num_attention_heads
lowerCamelCase__: Optional[int] =ffn_dim
lowerCamelCase__: str =activation_function
lowerCamelCase__: Dict =activation_dropout
lowerCamelCase__: Tuple =attention_dropout
lowerCamelCase__: str =max_position_embeddings
lowerCamelCase__: Any =initializer_range
lowerCamelCase__: str =None
lowerCamelCase__: Dict =0
lowerCamelCase__: Dict =2
lowerCamelCase__: List[str] =1
def SCREAMING_SNAKE_CASE_ (self : Any) ->Any:
'''simple docstring'''
return XGLMConfig.from_pretrained("facebook/xglm-564M")
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Tuple =tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) , clip_value_min=0 , clip_value_max=3)
lowerCamelCase__: List[Any] =None
if self.use_input_mask:
lowerCamelCase__: Dict =random_attention_mask([self.batch_size, self.seq_length])
lowerCamelCase__: Any =self.get_config()
lowerCamelCase__: Tuple =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2)
return (
config,
input_ids,
input_mask,
head_mask,
)
def SCREAMING_SNAKE_CASE_ (self : Any) ->List[str]:
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=UpperCAmelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[str]:
'''simple docstring'''
lowerCamelCase__: List[Any] =self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
): str =config_and_inputs
lowerCamelCase__: str ={
"input_ids": input_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
lowercase_ = (TFXGLMForCausalLM,) if is_tf_available() else ()
lowercase_ = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Dict:
'''simple docstring'''
lowerCamelCase__: str =TFXGLMModelTester(self)
lowerCamelCase__: List[str] =ConfigTester(self , config_class=UpperCAmelCase_ , n_embd=37)
def SCREAMING_SNAKE_CASE_ (self : int) ->int:
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[Any]:
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__: str =TFXGLMModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
@unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor.")
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Optional[int]:
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : int=True) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Any =TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M")
lowerCamelCase__: str =tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
lowerCamelCase__: Union[str, Any] =[2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
lowerCamelCase__: int =model.generate(UpperCAmelCase_ , do_sample=UpperCAmelCase_ , num_beams=1)
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =XGLMTokenizer.from_pretrained("facebook/xglm-564M")
lowerCamelCase__: str =TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M")
tf.random.set_seed(0)
lowerCamelCase__: Dict =tokenizer("Today is a nice day and" , return_tensors="tf")
lowerCamelCase__: List[str] =tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(":/CPU:0"):
lowerCamelCase__: Any =model.generate(UpperCAmelCase_ , do_sample=UpperCAmelCase_ , seed=[7, 0])
lowerCamelCase__: Dict =tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =(
"Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"
)
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M")
lowerCamelCase__: Dict =XGLMTokenizer.from_pretrained("facebook/xglm-564M")
lowerCamelCase__: Union[str, Any] ="left"
# use different length sentences to test batching
lowerCamelCase__: int =[
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When",
"Hello, my dog is a little",
]
lowerCamelCase__: str =tokenizer(UpperCAmelCase_ , return_tensors="tf" , padding=UpperCAmelCase_)
lowerCamelCase__: Any =inputs["input_ids"]
lowerCamelCase__: Union[str, Any] =model.generate(input_ids=UpperCAmelCase_ , attention_mask=inputs["attention_mask"] , max_new_tokens=12)
lowerCamelCase__: Dict =tokenizer(sentences[0] , return_tensors="tf").input_ids
lowerCamelCase__: str =model.generate(input_ids=UpperCAmelCase_ , max_new_tokens=12)
lowerCamelCase__: Tuple =tokenizer(sentences[1] , return_tensors="tf").input_ids
lowerCamelCase__: List[str] =model.generate(input_ids=UpperCAmelCase_ , max_new_tokens=12)
lowerCamelCase__: Optional[int] =tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_)
lowerCamelCase__: int =tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCAmelCase_)
lowerCamelCase__: int =tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCAmelCase_)
lowerCamelCase__: List[Any] =[
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When left padding is applied, the sequence will be "
"a single",
"Hello, my dog is a little bit of a shy one, but he is very friendly",
]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , [non_padded_sentence, padded_sentence])
| 273 |
from ..utils import DummyObject, requires_backends
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch", "torchsde"]
def __init__(self : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Tuple) ->Any:
'''simple docstring'''
requires_backends(self , ["torch", "torchsde"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Dict , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[int]) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch", "torchsde"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Tuple , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any) ->Any:
'''simple docstring'''
requires_backends(cls , ["torch", "torchsde"])
| 273 | 1 |
'''simple docstring'''
def __magic_name__( lowerCamelCase, lowerCamelCase):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''')
__lowerCAmelCase = str(bin(a__))[2:] # remove the leading "0b"
__lowerCAmelCase = str(bin(a__))[2:] # remove the leading "0b"
__lowerCAmelCase = max(len(a__), len(a__))
return "0b" + "".join(
str(int(char_a != char_b))
for char_a, char_b in zip(a_binary.zfill(a__), b_binary.zfill(a__)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 174 |
# flake8: noqa
# Lint as: python3
A : Optional[Any] = [
'VerificationMode',
'Version',
'disable_progress_bar',
'enable_progress_bar',
'is_progress_bar_enabled',
'experimental',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental | 6 | 0 |
'''simple docstring'''
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--original_config_file",
type=str,
required=True,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--image_size",
default=512,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'''could not parse string as bool {string}''' )
parser.add_argument(
"--use_linear_projection", help="Override for use linear projection", required=False, type=parse_bool
)
parser.add_argument("--cross_attention_dim", help="Override for cross attention_dim", required=False, type=int)
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 361 |
'''simple docstring'''
import os
import numpy
import onnx
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = a.name
__snake_case : Dict = b.name
__snake_case : Optional[int] = """"""
__snake_case : int = """"""
__snake_case : Any = a == b
__snake_case : List[Any] = name_a
__snake_case : List[str] = name_b
return res
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_lowerCamelCase , _lowerCamelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase )
_graph_replace_input_with(node_proto.attribute[1].g , _lowerCamelCase , _lowerCamelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
__snake_case : Dict = list(model.graph.initializer )
__snake_case : List[Any] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__snake_case : Tuple = inits[i].name
__snake_case : Tuple = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : str = os.path.dirname(_lowerCamelCase )
__snake_case : Dict = os.path.basename(_lowerCamelCase )
__snake_case : Union[str, Any] = onnx.load(os.path.join(_lowerCamelCase , _lowerCamelCase ) )
__snake_case : Dict = list(model.graph.initializer )
__snake_case : Optional[int] = set()
__snake_case : Optional[Any] = {}
__snake_case : Tuple = []
__snake_case : List[Any] = 0
for i in range(len(_lowerCamelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_lowerCamelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_lowerCamelCase )
dup_set.add(_lowerCamelCase )
__snake_case : List[Any] = inits[j].data_type
__snake_case : List[str] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , _lowerCamelCase )
total_reduced_size += mem_size
__snake_case : Any = inits[i].name
__snake_case : Any = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_lowerCamelCase )
else:
__snake_case : Dict = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" )
__snake_case : int = sorted(_lowerCamelCase )
_remove_dup_initializers_from_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__snake_case : str = """optimized_""" + model_file_name
__snake_case : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
onnx.save(_lowerCamelCase , _lowerCamelCase )
return new_model
| 13 | 0 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"--repo_path",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = {
'''image_size''': '''sample_size''',
'''num_res_blocks''': '''layers_per_block''',
'''block_channels''': '''block_out_channels''',
'''down_blocks''': '''down_block_types''',
'''up_blocks''': '''up_block_types''',
'''downscale_freq_shift''': '''freq_shift''',
'''resnet_num_groups''': '''norm_num_groups''',
'''resnet_act_fn''': '''act_fn''',
'''resnet_eps''': '''norm_eps''',
'''num_head_channels''': '''attention_head_dim''',
}
_SCREAMING_SNAKE_CASE = {
'''time_steps''': '''time_proj''',
'''mid''': '''mid_block''',
'''downsample_blocks''': '''down_blocks''',
'''upsample_blocks''': '''up_blocks''',
}
_SCREAMING_SNAKE_CASE = '''''' if has_file(args.repo_path, "config.json") else '''unet'''
with open(os.path.join(args.repo_path, subfolder, "config.json"), "r", encoding="utf-8") as reader:
_SCREAMING_SNAKE_CASE = reader.read()
_SCREAMING_SNAKE_CASE = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, "config.json"):
_SCREAMING_SNAKE_CASE = UNetaDModel(**config)
else:
_SCREAMING_SNAKE_CASE = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel
_SCREAMING_SNAKE_CASE = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
_SCREAMING_SNAKE_CASE = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
_SCREAMING_SNAKE_CASE = config[key]
del config[key]
_SCREAMING_SNAKE_CASE = [k.replace("UNetRes", "") for k in config['''down_block_types''']]
_SCREAMING_SNAKE_CASE = [k.replace("UNetRes", "") for k in config['''up_block_types''']]
if do_only_weights:
_SCREAMING_SNAKE_CASE = torch.load(os.path.join(args.repo_path, subfolder, "diffusion_pytorch_model.bin"))
_SCREAMING_SNAKE_CASE = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(".op.bias") or param_key.endswith(".op.weight"):
continue
_SCREAMING_SNAKE_CASE = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(".")[0] == key:
_SCREAMING_SNAKE_CASE = param_value
_SCREAMING_SNAKE_CASE = True
if not has_changed:
_SCREAMING_SNAKE_CASE = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 158 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE ( metaclass=a_ ):
"""simple docstring"""
lowercase__ = ["speech"]
def __init__( self : Tuple ,*lowercase_ : Tuple ,**lowercase_ : List[str] ):
requires_backends(self ,['''speech'''] )
class SCREAMING_SNAKE_CASE ( metaclass=a_ ):
"""simple docstring"""
lowercase__ = ["speech"]
def __init__( self : Union[str, Any] ,*lowercase_ : List[str] ,**lowercase_ : Any ):
requires_backends(self ,['''speech'''] )
| 106 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : Optional[Any] = tempfile.mkdtemp()
lowerCAmelCase_ : int = BlipImageProcessor()
lowerCAmelCase_ : List[str] = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel' )
lowerCAmelCase_ : Tuple = BlipProcessor(_A , _A )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **_A ).tokenizer
def SCREAMING_SNAKE_CASE__ ( self : Any , **SCREAMING_SNAKE_CASE_ : List[Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **_A ).image_processor
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ : List[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCAmelCase_ : str = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowerCAmelCase_ : List[str] = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ : str = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase_ : Optional[Any] = self.get_image_processor(do_normalize=_A , padding_value=1.0 )
lowerCAmelCase_ : int = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : Optional[Any] = self.get_image_processor()
lowerCAmelCase_ : Tuple = self.get_tokenizer()
lowerCAmelCase_ : Optional[int] = BlipProcessor(tokenizer=_A , image_processor=_A )
lowerCAmelCase_ : Tuple = self.prepare_image_inputs()
lowerCAmelCase_ : Dict = image_processor(_A , return_tensors='np' )
lowerCAmelCase_ : str = processor(images=_A , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : Union[str, Any] = self.get_image_processor()
lowerCAmelCase_ : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase_ : int = BlipProcessor(tokenizer=_A , image_processor=_A )
lowerCAmelCase_ : int = 'lower newer'
lowerCAmelCase_ : List[Any] = processor(text=_A )
lowerCAmelCase_ : Optional[int] = tokenizer(_A , return_token_type_ids=_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : Dict = self.get_image_processor()
lowerCAmelCase_ : List[Any] = self.get_tokenizer()
lowerCAmelCase_ : str = BlipProcessor(tokenizer=_A , image_processor=_A )
lowerCAmelCase_ : List[str] = 'lower newer'
lowerCAmelCase_ : Any = self.prepare_image_inputs()
lowerCAmelCase_ : Any = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowerCAmelCase_ : Optional[Any] = self.get_image_processor()
lowerCAmelCase_ : int = self.get_tokenizer()
lowerCAmelCase_ : Union[str, Any] = BlipProcessor(tokenizer=_A , image_processor=_A )
lowerCAmelCase_ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase_ : int = processor.batch_decode(_A )
lowerCAmelCase_ : List[Any] = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : str = self.get_image_processor()
lowerCAmelCase_ : List[str] = self.get_tokenizer()
lowerCAmelCase_ : Optional[Any] = BlipProcessor(tokenizer=_A , image_processor=_A )
lowerCAmelCase_ : Optional[int] = 'lower newer'
lowerCAmelCase_ : Union[str, Any] = self.prepare_image_inputs()
lowerCAmelCase_ : int = processor(text=_A , images=_A )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
| 357 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase__ : Optional[int] = logging.get_logger(__name__)
class UpperCamelCase__ ( lowercase_, lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """maskformer-swin"""
_SCREAMING_SNAKE_CASE = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any]=2_2_4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE_ : int=3 , SCREAMING_SNAKE_CASE_ : Dict=9_6 , SCREAMING_SNAKE_CASE_ : Optional[int]=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE_ : List[Any]=[3, 6, 1_2, 2_4] , SCREAMING_SNAKE_CASE_ : List[str]=7 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4.0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=0.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE_ : Dict=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE_ : int=1E-5 , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , **SCREAMING_SNAKE_CASE_ : str , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = image_size
lowerCAmelCase_ : Optional[Any] = patch_size
lowerCAmelCase_ : Optional[int] = num_channels
lowerCAmelCase_ : List[str] = embed_dim
lowerCAmelCase_ : Dict = depths
lowerCAmelCase_ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Tuple = num_heads
lowerCAmelCase_ : List[str] = window_size
lowerCAmelCase_ : Any = mlp_ratio
lowerCAmelCase_ : Any = qkv_bias
lowerCAmelCase_ : List[Any] = hidden_dropout_prob
lowerCAmelCase_ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Tuple = drop_path_rate
lowerCAmelCase_ : List[str] = hidden_act
lowerCAmelCase_ : Any = use_absolute_embeddings
lowerCAmelCase_ : Optional[Any] = layer_norm_eps
lowerCAmelCase_ : str = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase_ : List[str] = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE_ ) - 1) )
lowerCAmelCase_ : List[Any] = ['stem'] + [F"stage{idx}" for idx in range(1 , len(SCREAMING_SNAKE_CASE_ ) + 1 )]
lowerCAmelCase_ ,lowerCAmelCase_ : Tuple = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , stage_names=self.stage_names )
| 289 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_UpperCamelCase: List[str] = logging.get_logger(__name__)
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = 'upernet'
def __init__( self : Union[str, Any], lowerCAmelCase : List[Any]=None, lowerCAmelCase : str=512, lowerCAmelCase : List[Any]=0.02, lowerCAmelCase : Optional[int]=[1, 2, 3, 6], lowerCAmelCase : Dict=True, lowerCAmelCase : Optional[Any]=0.4, lowerCAmelCase : Union[str, Any]=384, lowerCAmelCase : Optional[Any]=256, lowerCAmelCase : List[str]=1, lowerCAmelCase : Union[str, Any]=False, lowerCAmelCase : List[Any]=255, **lowerCAmelCase : List[Any], ) -> List[Any]:
super().__init__(**lowerCAmelCase )
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowercase : Tuple = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
elif isinstance(lowerCAmelCase, lowerCAmelCase ):
lowercase : Union[str, Any] = backbone_config.get('model_type' )
lowercase : List[Any] = CONFIG_MAPPING[backbone_model_type]
lowercase : Dict = config_class.from_dict(lowerCAmelCase )
lowercase : str = backbone_config
lowercase : Union[str, Any] = hidden_size
lowercase : str = initializer_range
lowercase : Optional[Any] = pool_scales
lowercase : str = use_auxiliary_head
lowercase : Dict = auxiliary_loss_weight
lowercase : str = auxiliary_in_channels
lowercase : Any = auxiliary_channels
lowercase : Tuple = auxiliary_num_convs
lowercase : List[str] = auxiliary_concat_input
lowercase : Dict = loss_ignore_index
def lowercase ( self : Tuple ) -> Tuple:
lowercase : int = copy.deepcopy(self.__dict__ )
lowercase : int = self.backbone_config.to_dict()
lowercase : str = self.__class__.model_type
return output
| 255 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
@slow
def lowercase ( self : List[Any] ) -> List[Any]:
lowercase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' )
lowercase : Dict = AutoTokenizer.from_pretrained('google/mt5-small' )
lowercase : List[Any] = tokenizer('Hello there', return_tensors='tf' ).input_ids
lowercase : Any = tokenizer('Hi I am', return_tensors='tf' ).input_ids
lowercase : Dict = model(lowerCAmelCase, labels=lowerCAmelCase ).loss
lowercase : Optional[int] = -tf.math.reduce_mean(lowerCAmelCase ).numpy()
lowercase : Tuple = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 255 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {
'''configuration_albert''': ['''ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AlbertConfig''', '''AlbertOnnxConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''AlbertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''AlbertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AlbertForMaskedLM''',
'''AlbertForMultipleChoice''',
'''AlbertForPreTraining''',
'''AlbertForQuestionAnswering''',
'''AlbertForSequenceClassification''',
'''AlbertForTokenClassification''',
'''AlbertModel''',
'''AlbertPreTrainedModel''',
'''load_tf_weights_in_albert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAlbertForMaskedLM''',
'''TFAlbertForMultipleChoice''',
'''TFAlbertForPreTraining''',
'''TFAlbertForQuestionAnswering''',
'''TFAlbertForSequenceClassification''',
'''TFAlbertForTokenClassification''',
'''TFAlbertMainLayer''',
'''TFAlbertModel''',
'''TFAlbertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''FlaxAlbertForMaskedLM''',
'''FlaxAlbertForMultipleChoice''',
'''FlaxAlbertForPreTraining''',
'''FlaxAlbertForQuestionAnswering''',
'''FlaxAlbertForSequenceClassification''',
'''FlaxAlbertForTokenClassification''',
'''FlaxAlbertModel''',
'''FlaxAlbertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 351 |
"""simple docstring"""
from collections.abc import Generator
from math import sin
def UpperCAmelCase__ (snake_case__ : bytes ):
"""simple docstring"""
if len(snake_case__ ) != 32:
raise ValueError("""Input must be of length 32""" )
_snake_case : Optional[int] = B""""""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
_snake_case : Optional[Any] = format(snake_case__ , """08x""" )[-8:]
_snake_case : Optional[Any] = B""""""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" )
return little_endian_hex
def UpperCAmelCase__ (snake_case__ : bytes ):
"""simple docstring"""
_snake_case : Union[str, Any] = B""""""
for char in message:
bit_string += format(snake_case__ , """08b""" ).encode("""utf-8""" )
_snake_case : List[Any] = format(len(snake_case__ ) , """064b""" ).encode("""utf-8""" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(snake_case__ ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def UpperCAmelCase__ (snake_case__ : bytes ):
"""simple docstring"""
if len(snake_case__ ) % 5_12 != 0:
raise ValueError("""Input must have length that's a multiple of 512""" )
for pos in range(0 , len(snake_case__ ) , 5_12 ):
_snake_case : List[str] = bit_string[pos : pos + 5_12]
_snake_case : List[str] = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
_snake_case : Optional[int] = format(snake_case__ , """032b""" )
_snake_case : str = """"""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(snake_case__ , 2 )
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
return (a + b) % 2**32
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
if shift < 0:
raise ValueError("""Shift must be non-negative""" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def UpperCAmelCase__ (snake_case__ : bytes ):
"""simple docstring"""
_snake_case : Any = preprocess(snake_case__ )
_snake_case : Optional[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_snake_case : Union[str, Any] = 0x6745_2301
_snake_case : List[Any] = 0xEFCD_AB89
_snake_case : Optional[Any] = 0x98BA_DCFE
_snake_case : Optional[int] = 0x1032_5476
_snake_case : Tuple = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(snake_case__ ):
_snake_case : Tuple = aa
_snake_case : str = ba
_snake_case : int = ca
_snake_case : Dict = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_snake_case : int = d ^ (b & (c ^ d))
_snake_case : Dict = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_snake_case : Tuple = c ^ (d & (b ^ c))
_snake_case : int = (5 * i + 1) % 16
elif i <= 47:
_snake_case : List[Any] = b ^ c ^ d
_snake_case : Union[str, Any] = (3 * i + 5) % 16
else:
_snake_case : Tuple = c ^ (b | not_aa(snake_case__ ))
_snake_case : Optional[int] = (7 * i) % 16
_snake_case : Dict = (f + a + added_consts[i] + block_words[g]) % 2**32
_snake_case : List[str] = d
_snake_case : List[Any] = c
_snake_case : str = b
_snake_case : List[str] = sum_aa(snake_case__ , left_rotate_aa(snake_case__ , shift_amounts[i] ) )
# Add hashed chunk to running total
_snake_case : Union[str, Any] = sum_aa(snake_case__ , snake_case__ )
_snake_case : str = sum_aa(snake_case__ , snake_case__ )
_snake_case : Any = sum_aa(snake_case__ , snake_case__ )
_snake_case : List[str] = sum_aa(snake_case__ , snake_case__ )
_snake_case : Any = reformat_hex(snake_case__ ) + reformat_hex(snake_case__ ) + reformat_hex(snake_case__ ) + reformat_hex(snake_case__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 132 | 0 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_lowercase: Dict = pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def a( A : str , A : Tuple ) -> List[Any]:
"""simple docstring"""
inspect_dataset(A , A )
a = path + ".py"
assert script_name in os.listdir(A )
assert "__pycache__" not in os.listdir(A )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def a( A : List[Any] , A : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
inspect_metric(A , A )
a = path + ".py"
assert script_name in os.listdir(A )
assert "__pycache__" not in os.listdir(A )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def a( A : Optional[Any] , A : Optional[int] , A : Any ) -> List[Any]:
"""simple docstring"""
a = get_dataset_config_info(A , config_name=A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def a( A : Dict , A : Tuple , A : int ) -> Tuple:
"""simple docstring"""
with pytest.raises(A ):
get_dataset_config_info(A , config_name=A )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def a( A : List[str] , A : Any ) -> Optional[int]:
"""simple docstring"""
a = get_dataset_config_names(A )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def a( A : Tuple , A : Any , A : Optional[int] ) -> Dict:
"""simple docstring"""
a = get_dataset_infos(A )
assert list(infos.keys() ) == expected_configs
a = expected_configs[0]
assert expected_config in infos
a = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def a( A : List[Any] , A : Dict , A : Tuple ) -> Any:
"""simple docstring"""
a = get_dataset_infos(A )
assert expected_config in infos
a = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def a( A : List[str] , A : str , A : Tuple ) -> Dict:
"""simple docstring"""
with pytest.raises(A ):
get_dataset_split_names(A , config_name=A )
| 227 |
_lowercase: Dict = [
(1000, "M"),
(900, "CM"),
(500, "D"),
(400, "CD"),
(100, "C"),
(90, "XC"),
(50, "L"),
(40, "XL"),
(10, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def a( A : str ) -> int:
"""simple docstring"""
a = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
a = 0
a = 0
while place < len(A ):
if (place + 1 < len(A )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def a( A : int ) -> str:
"""simple docstring"""
a = []
for arabic, roman in ROMAN:
((a) , (a)) = divmod(A , A )
result.append(roman * factor )
if number == 0:
break
return "".join(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 227 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
__a :Any = None
__a :int = logging.get_logger(__name__)
__a :Dict = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
__a :Dict = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
__a :Any = {
'facebook/nllb-large-en-ro': 1024,
'facebook/nllb-200-distilled-600M': 1024,
}
# fmt: off
__a :Optional[Any] = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = VOCAB_FILES_NAMES
_lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Dict = ['input_ids', 'attention_mask']
_lowerCamelCase : Any = NllbTokenizer
_lowerCamelCase : List[int] = []
_lowerCamelCase : List[int] = []
def __init__( self : Tuple , UpperCAmelCase : Any=None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : int="<s>" , UpperCAmelCase : Optional[Any]="</s>" , UpperCAmelCase : List[str]="</s>" , UpperCAmelCase : int="<s>" , UpperCAmelCase : Tuple="<unk>" , UpperCAmelCase : Any="<pad>" , UpperCAmelCase : Dict="<mask>" , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=None , UpperCAmelCase : Tuple=None , UpperCAmelCase : List[str]=False , **UpperCAmelCase : List[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
A_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
A_ = legacy_behaviour
super().__init__(
vocab_file=UpperCAmelCase , tokenizer_file=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , src_lang=UpperCAmelCase , tgt_lang=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , legacy_behaviour=UpperCAmelCase , **UpperCAmelCase , )
A_ = vocab_file
A_ = False if not self.vocab_file else True
A_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
A_ = {
lang_code: self.convert_tokens_to_ids(UpperCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
A_ = src_lang if src_lang is not None else "eng_Latn"
A_ = self.convert_tokens_to_ids(self._src_lang )
A_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __A ( self : int ):
return self._src_lang
@src_lang.setter
def __A ( self : Tuple , UpperCAmelCase : str ):
A_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __A ( self : List[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __A ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : Optional[str] , UpperCAmelCase : Optional[str] , **UpperCAmelCase : List[Any] ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
A_ = src_lang
A_ = self(UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
A_ = self.convert_tokens_to_ids(UpperCAmelCase )
A_ = tgt_lang_id
return inputs
def __A ( self : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : str = "eng_Latn" , UpperCAmelCase : Optional[List[str]] = None , UpperCAmelCase : str = "fra_Latn" , **UpperCAmelCase : Optional[int] , ):
A_ = src_lang
A_ = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def __A ( self : List[Any] ):
return self.set_src_lang_special_tokens(self.src_lang )
def __A ( self : Tuple ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __A ( self : str , UpperCAmelCase : str ):
A_ = self.convert_tokens_to_ids(UpperCAmelCase )
if self.legacy_behaviour:
A_ = []
A_ = [self.eos_token_id, self.cur_lang_code]
else:
A_ = [self.cur_lang_code]
A_ = [self.eos_token_id]
A_ = self.convert_ids_to_tokens(self.prefix_tokens )
A_ = self.convert_ids_to_tokens(self.suffix_tokens )
A_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __A ( self : Optional[Any] , UpperCAmelCase : str ):
A_ = self.convert_tokens_to_ids(UpperCAmelCase )
if self.legacy_behaviour:
A_ = []
A_ = [self.eos_token_id, self.cur_lang_code]
else:
A_ = [self.cur_lang_code]
A_ = [self.eos_token_id]
A_ = self.convert_ids_to_tokens(self.prefix_tokens )
A_ = self.convert_ids_to_tokens(self.suffix_tokens )
A_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __A ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
A_ = os.path.join(
UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ):
copyfile(self.vocab_file , UpperCAmelCase )
return (out_vocab_file,) | 329 |
from __future__ import annotations
def __snake_case ( __UpperCamelCase : int = 4 ):
"""simple docstring"""
A_ = abs(__UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(__UpperCamelCase )] for y in range(__UpperCamelCase )]
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
return reverse_row(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
return reverse_row(reverse_column(__UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
return reverse_column(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
A_ = [list(__UpperCamelCase ) for x in zip(*__UpperCamelCase )]
return matrix
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
A_ = matrix[::-1]
return matrix
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
A_ = [x[::-1] for x in matrix]
return matrix
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
for i in matrix:
print(*__UpperCamelCase )
if __name__ == "__main__":
__a :Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
__a :Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
__a :Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix)) | 329 | 1 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger()
@dataclass
class SCREAMING_SNAKE_CASE :
__lowerCamelCase : nn.Module
__lowerCamelCase : List[nn.Module] =field(default_factory=lowercase__ )
__lowerCamelCase : list =field(default_factory=lowercase__ )
def UpperCamelCase_ ( self : Tuple , __lowercase : List[str] , __lowercase : Tensor , __lowercase : Tensor ):
'''simple docstring'''
__a = len(list(m.modules() ) ) == 1 or isinstance(__lowercase , nn.Convad ) or isinstance(__lowercase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(__lowercase )
def __call__( self : Dict , __lowercase : Tensor ):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(__lowercase )
[x.remove() for x in self.handles]
return self
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda __lowercase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class SCREAMING_SNAKE_CASE :
__lowerCamelCase : nn.Module
__lowerCamelCase : nn.Module
__lowerCamelCase : int =1
__lowerCamelCase : List =field(default_factory=lowercase__ )
__lowerCamelCase : List =field(default_factory=lowercase__ )
__lowerCamelCase : bool =True
def __call__( self : Any , __lowercase : Tensor ):
'''simple docstring'''
__a = Tracker(self.dest )(__lowercase ).parametrized
__a = Tracker(self.src )(__lowercase ).parametrized
__a = list(filter(lambda __lowercase : type(__lowercase ) not in self.src_skip , __lowercase ) )
__a = list(filter(lambda __lowercase : type(__lowercase ) not in self.dest_skip , __lowercase ) )
if len(__lowercase ) != len(__lowercase ) and self.raise_if_mismatch:
raise Exception(
F"Numbers of operations are different. Source module has {len(__lowercase )} operations while"
F" destination module has {len(__lowercase )}." )
for dest_m, src_m in zip(__lowercase , __lowercase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F"Transfered from={src_m} to={dest_m}" )
class SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Any , __lowercase : nn.Module ):
'''simple docstring'''
super().__init__()
__a = []
# - get the stem
feature_blocks.append(("""conv1""", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("""block""" ), F"Unexpected layer name {k}"
__a = len(__lowercase ) + 1
feature_blocks.append((F"res{block_index}", v) )
__a = nn.ModuleDict(__lowercase )
def UpperCamelCase_ ( self : Tuple , __lowercase : Tensor ):
'''simple docstring'''
return get_trunk_forward_outputs(
__lowercase , out_feat_keys=__lowercase , feature_blocks=self._feature_blocks , )
class SCREAMING_SNAKE_CASE ( lowercase__ ):
def UpperCamelCase_ ( self : Optional[int] , __lowercase : str ):
'''simple docstring'''
__a = x.split("""-""" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Dict , __lowercase : str ):
'''simple docstring'''
# default to timm!
if x not in self:
__a = self.convert_name_to_timm(__lowercase )
__a = partial(lambda: (timm.create_model(__lowercase , pretrained=__lowercase ).eval(), None) )
else:
__a = super().__getitem__(__lowercase )
return val
class SCREAMING_SNAKE_CASE ( lowercase__ ):
def __getitem__( self : str , __lowercase : str ):
'''simple docstring'''
if "seer" in x and "in1k" not in x:
__a = RegNetModel
else:
__a = RegNetForImageClassification
return val
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Tuple[str, str]] ):
"""simple docstring"""
for from_key, to_key in keys:
__a = from_state_dict[from_key].clone()
print(f"Copied key={from_key} to={to_key}" )
return to_state_dict
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Callable[[], nn.Module] , _SCREAMING_SNAKE_CASE : Callable[[], nn.Module] , _SCREAMING_SNAKE_CASE : RegNetConfig , _SCREAMING_SNAKE_CASE : Path , _SCREAMING_SNAKE_CASE : bool = True , ):
"""simple docstring"""
print(f"Converting {name}..." )
with torch.no_grad():
__a = from_model_func()
__a = our_model_func(__lowerCamelCase ).eval()
__a = ModuleTransfer(src=__lowerCamelCase , dest=__lowerCamelCase , raise_if_mismatch=__lowerCamelCase )
__a = torch.randn((1, 3, 224, 224) )
module_transfer(__lowerCamelCase )
if from_state_dict is not None:
__a = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
__a = [("""0.clf.0.weight""", """classifier.1.weight"""), ("""0.clf.0.bias""", """classifier.1.bias""")]
__a = manually_copy_vissl_head(__lowerCamelCase , our_model.state_dict() , __lowerCamelCase )
our_model.load_state_dict(__lowerCamelCase )
__a = our_model(__lowerCamelCase , output_hidden_states=__lowerCamelCase )
__a = (
our_outputs.logits if isinstance(__lowerCamelCase , __lowerCamelCase ) else our_outputs.last_hidden_state
)
__a = from_model(__lowerCamelCase )
__a = from_output[-1] if type(__lowerCamelCase ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
__a = our_outputs.hidden_states[-1]
assert torch.allclose(__lowerCamelCase , __lowerCamelCase ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="""Add model""" , use_temp_dir=__lowerCamelCase , )
__a = 224 if """seer""" not in name else 384
# we can use the convnext one
__a = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" , size=__lowerCamelCase )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="""Add image processor""" , use_temp_dir=__lowerCamelCase , )
print(f"Pushed {name}" )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Path , _SCREAMING_SNAKE_CASE : str = None , _SCREAMING_SNAKE_CASE : bool = True ):
"""simple docstring"""
__a = """imagenet-1k-id2label.json"""
__a = 1000
__a = (1, num_labels)
__a = """huggingface/label-files"""
__a = num_labels
__a = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) ) , """r""" ) )
__a = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
__a = partial(__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase )
__a = {
"""regnet-x-002""": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="""x""" ),
"""regnet-x-004""": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="""x""" ),
"""regnet-x-006""": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="""x""" ),
"""regnet-x-008""": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="""x""" ),
"""regnet-x-016""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="""x""" ),
"""regnet-x-032""": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type="""x""" ),
"""regnet-x-040""": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type="""x""" ),
"""regnet-x-064""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type="""x""" ),
"""regnet-x-080""": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type="""x""" ),
"""regnet-x-120""": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type="""x""" ),
"""regnet-x-160""": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type="""x""" ),
"""regnet-x-320""": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type="""x""" ),
# y variant
"""regnet-y-002""": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
"""regnet-y-004""": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
"""regnet-y-006""": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
"""regnet-y-008""": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
"""regnet-y-016""": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
"""regnet-y-032""": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
"""regnet-y-040""": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
"""regnet-y-064""": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
"""regnet-y-080""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
"""regnet-y-120""": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
"""regnet-y-160""": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
"""regnet-y-320""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"""regnet-y-320-seer""": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"""regnet-y-640-seer""": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"""regnet-y-1280-seer""": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"""regnet-y-2560-seer""": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"""regnet-y-10b-seer""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
# finetuned on imagenet
"""regnet-y-320-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"""regnet-y-640-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"""regnet-y-1280-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"""regnet-y-2560-seer-in1k""": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"""regnet-y-10b-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
}
__a = NameToOurModelFuncMap()
__a = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
__a = torch.hub.load_state_dict_from_url(__lowerCamelCase , model_dir=str(__lowerCamelCase ) , map_location="""cpu""" )
__a = model_func()
# check if we have a head, if yes add it
__a = files["""classy_state_dict"""]["""base_model"""]["""model"""]
__a = model_state_dict["""trunk"""]
model.load_state_dict(__lowerCamelCase )
return model.eval(), model_state_dict["heads"]
# pretrained
__a = partial(
__lowerCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__a = partial(
__lowerCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__a = partial(
__lowerCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__a = partial(
__lowerCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch""" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
__a = partial(
__lowerCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__a = partial(
__lowerCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__a = partial(
__lowerCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__a = partial(
__lowerCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch""" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
__lowerCamelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , __lowerCamelCase , __lowerCamelCase , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
__lowerCamelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
return config, expected_shape
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported regnet* architecture,"""
""" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 302 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
a : Dict = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
a : Any = typing.Union[np.floataa, int, float] # noqa: UP007
def lowerCamelCase__ ( __lowerCamelCase : Vector , __lowerCamelCase : Vector ):
return np.sqrt(np.sum((np.asarray(__lowerCamelCase ) - np.asarray(__lowerCamelCase )) ** 2 ) )
def lowerCamelCase__ ( __lowerCamelCase : Vector , __lowerCamelCase : Vector ):
return sum((va - va) ** 2 for va, va in zip(__lowerCamelCase , __lowerCamelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def lowerCamelCase__ ( ):
from timeit import timeit
print("""Without Numpy""" )
print(
timeit(
"""euclidean_distance_no_np([1, 2, 3], [4, 5, 6])""" , number=10000 , globals=globals() , ) )
print("""With Numpy""" )
print(
timeit(
"""euclidean_distance([1, 2, 3], [4, 5, 6])""" , number=10000 , globals=globals() , ) )
benchmark()
| 114 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _UpperCAmelCase ( A_ , A_ , A_ , unittest.TestCase):
_lowerCAmelCase : Optional[int] = StableDiffusionControlNetImgaImgPipeline
_lowerCAmelCase : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
_lowerCAmelCase : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowerCAmelCase : int = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""})
_lowerCAmelCase : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _snake_case ( self : Tuple ):
torch.manual_seed(0 )
snake_case_ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
snake_case_ : Dict = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
snake_case_ : Tuple = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=_lowerCamelCase , set_alpha_to_one=_lowerCamelCase , )
torch.manual_seed(0 )
snake_case_ : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
snake_case_ : Tuple = CLIPTextModel(_lowerCamelCase )
snake_case_ : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case_ : Dict = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _snake_case ( self : Tuple , lowercase_ : Any , lowercase_ : Union[str, Any]=0 ):
if str(_lowerCamelCase ).startswith('''mps''' ):
snake_case_ : int = torch.manual_seed(_lowerCamelCase )
else:
snake_case_ : str = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
snake_case_ : int = 2
snake_case_ : Optional[int] = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_lowerCamelCase , device=torch.device(_lowerCamelCase ) , )
snake_case_ : Dict = floats_tensor(control_image.shape , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
snake_case_ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ : int = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((64, 64) )
snake_case_ : Union[str, Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def _snake_case ( self : Tuple ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _snake_case ( self : List[str] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def _snake_case ( self : Any ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class _UpperCAmelCase ( A_ , A_ , unittest.TestCase):
_lowerCAmelCase : str = StableDiffusionControlNetImgaImgPipeline
_lowerCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
_lowerCAmelCase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowerCAmelCase : Any = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def _snake_case ( self : Optional[Any] ):
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(lowercase_ : List[str] ):
if isinstance(_lowerCamelCase , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
snake_case_ : int = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_lowerCamelCase )
torch.manual_seed(0 )
snake_case_ : Tuple = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_lowerCamelCase )
torch.manual_seed(0 )
snake_case_ : List[str] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=_lowerCamelCase , set_alpha_to_one=_lowerCamelCase , )
torch.manual_seed(0 )
snake_case_ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case_ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
snake_case_ : Optional[Any] = CLIPTextModel(_lowerCamelCase )
snake_case_ : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case_ : Optional[int] = MultiControlNetModel([controlneta, controlneta] )
snake_case_ : Any = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _snake_case ( self : Any , lowercase_ : Dict , lowercase_ : Tuple=0 ):
if str(_lowerCamelCase ).startswith('''mps''' ):
snake_case_ : List[str] = torch.manual_seed(_lowerCamelCase )
else:
snake_case_ : Union[str, Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
snake_case_ : List[Any] = 2
snake_case_ : Optional[Any] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_lowerCamelCase , device=torch.device(_lowerCamelCase ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_lowerCamelCase , device=torch.device(_lowerCamelCase ) , ),
]
snake_case_ : Optional[Any] = floats_tensor(control_image[0].shape , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
snake_case_ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ : Union[str, Any] = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((64, 64) )
snake_case_ : Union[str, Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def _snake_case ( self : Optional[int] ):
snake_case_ : Optional[Any] = self.get_dummy_components()
snake_case_ : Union[str, Any] = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
snake_case_ : List[str] = 10.0
snake_case_ : Any = 4
snake_case_ : int = self.get_dummy_inputs(_lowerCamelCase )
snake_case_ : Optional[Any] = steps
snake_case_ : List[Any] = scale
snake_case_ : Any = pipe(**_lowerCamelCase )[0]
snake_case_ : List[Any] = self.get_dummy_inputs(_lowerCamelCase )
snake_case_ : Any = steps
snake_case_ : Optional[int] = scale
snake_case_ : Optional[Any] = pipe(**_lowerCamelCase , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
snake_case_ : Dict = self.get_dummy_inputs(_lowerCamelCase )
snake_case_ : Tuple = steps
snake_case_ : List[str] = scale
snake_case_ : Optional[Any] = pipe(**_lowerCamelCase , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
snake_case_ : Optional[int] = self.get_dummy_inputs(_lowerCamelCase )
snake_case_ : Optional[int] = steps
snake_case_ : List[Any] = scale
snake_case_ : str = pipe(**_lowerCamelCase , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def _snake_case ( self : List[str] ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _snake_case ( self : List[str] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def _snake_case ( self : Union[str, Any] ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def _snake_case ( self : List[str] ):
snake_case_ : Any = self.get_dummy_components()
snake_case_ : List[Any] = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_lowerCamelCase )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : str ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : Union[str, Any] ):
snake_case_ : int = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
snake_case_ : Optional[int] = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=_lowerCamelCase , controlnet=_lowerCamelCase )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_lowerCamelCase )
snake_case_ : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case_ : Tuple = '''evil space-punk bird'''
snake_case_ : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) )
snake_case_ : List[Any] = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) )
snake_case_ : List[str] = pipe(
_lowerCamelCase , _lowerCamelCase , control_image=_lowerCamelCase , generator=_lowerCamelCase , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
snake_case_ : Dict = output.images[0]
assert image.shape == (512, 512, 3)
snake_case_ : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9E-2
| 368 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class _UpperCAmelCase ( unittest.TestCase):
_lowerCAmelCase : Optional[int] = MODEL_FOR_CAUSAL_LM_MAPPING
_lowerCAmelCase : Union[str, Any] = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def _snake_case ( self : Any ):
snake_case_ : Dict = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
snake_case_ : List[str] = text_generator('''This is a test''' , do_sample=lowercase_ )
self.assertEqual(
lowercase_ , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
snake_case_ : Tuple = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
lowercase_ , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
snake_case_ : int = text_generator('''This is a test''' , do_sample=lowercase_ , num_return_sequences=2 , return_tensors=lowercase_ )
self.assertEqual(
lowercase_ , [
{'''generated_token_ids''': ANY(lowercase_ )},
{'''generated_token_ids''': ANY(lowercase_ )},
] , )
snake_case_ : Tuple = text_generator.model.config.eos_token_id
snake_case_ : Any = '''<pad>'''
snake_case_ : Optional[Any] = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=lowercase_ , num_return_sequences=2 , batch_size=2 , return_tensors=lowercase_ , )
self.assertEqual(
lowercase_ , [
[
{'''generated_token_ids''': ANY(lowercase_ )},
{'''generated_token_ids''': ANY(lowercase_ )},
],
[
{'''generated_token_ids''': ANY(lowercase_ )},
{'''generated_token_ids''': ANY(lowercase_ )},
],
] , )
@require_tf
def _snake_case ( self : Any ):
snake_case_ : List[str] = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
snake_case_ : List[Any] = text_generator('''This is a test''' , do_sample=lowercase_ )
self.assertEqual(
lowercase_ , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
snake_case_ : Tuple = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=lowercase_ )
self.assertEqual(
lowercase_ , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def _snake_case ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : int ):
snake_case_ : str = TextGenerationPipeline(model=lowercase_ , tokenizer=lowercase_ )
return text_generator, ["This is a test", "Another test"]
def _snake_case ( self : Any ):
snake_case_ : int = '''Hello I believe in'''
snake_case_ : Dict = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ : Optional[Any] = text_generator(lowercase_ )
self.assertEqual(
lowercase_ , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
snake_case_ : Any = text_generator(lowercase_ , stop_sequence=''' fe''' )
self.assertEqual(lowercase_ , [{'''generated_text''': '''Hello I believe in fe'''}] )
def _snake_case ( self : Optional[int] , lowercase_ : str , lowercase_ : List[Any] ):
snake_case_ : Any = text_generator.model
snake_case_ : str = text_generator.tokenizer
snake_case_ : Tuple = text_generator('''This is a test''' )
self.assertEqual(lowercase_ , [{'''generated_text''': ANY(lowercase_ )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
snake_case_ : Any = text_generator('''This is a test''' , return_full_text=lowercase_ )
self.assertEqual(lowercase_ , [{'''generated_text''': ANY(lowercase_ )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
snake_case_ : Optional[Any] = pipeline(task='''text-generation''' , model=lowercase_ , tokenizer=lowercase_ , return_full_text=lowercase_ )
snake_case_ : str = text_generator('''This is a test''' )
self.assertEqual(lowercase_ , [{'''generated_text''': ANY(lowercase_ )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
snake_case_ : List[str] = text_generator('''This is a test''' , return_full_text=lowercase_ )
self.assertEqual(lowercase_ , [{'''generated_text''': ANY(lowercase_ )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
snake_case_ : List[Any] = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=lowercase_ )
self.assertEqual(
lowercase_ , [
[{'''generated_text''': ANY(lowercase_ )}, {'''generated_text''': ANY(lowercase_ )}],
[{'''generated_text''': ANY(lowercase_ )}, {'''generated_text''': ANY(lowercase_ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
snake_case_ : List[Any] = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=lowercase_ )
self.assertEqual(
lowercase_ , [
[{'''generated_text''': ANY(lowercase_ )}, {'''generated_text''': ANY(lowercase_ )}],
[{'''generated_text''': ANY(lowercase_ )}, {'''generated_text''': ANY(lowercase_ )}],
] , )
with self.assertRaises(lowercase_ ):
snake_case_ : int = text_generator('''test''' , return_full_text=lowercase_ , return_text=lowercase_ )
with self.assertRaises(lowercase_ ):
snake_case_ : Dict = text_generator('''test''' , return_full_text=lowercase_ , return_tensors=lowercase_ )
with self.assertRaises(lowercase_ ):
snake_case_ : Dict = text_generator('''test''' , return_text=lowercase_ , return_tensors=lowercase_ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
snake_case_ : str = text_generator('''''' )
self.assertEqual(lowercase_ , [{'''generated_text''': ANY(lowercase_ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
snake_case_ : List[str] = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
snake_case_ : List[Any] = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 500 , max_new_tokens=20 )
snake_case_ : Tuple = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(lowercase_ ):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def _snake_case ( self : Optional[int] ):
import torch
# Classic `model_kwargs`
snake_case_ : List[str] = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
snake_case_ : Tuple = pipe('''This is a test''' )
self.assertEqual(
lowercase_ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
snake_case_ : Optional[Any] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
snake_case_ : Optional[Any] = pipe('''This is a test''' )
self.assertEqual(
lowercase_ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
snake_case_ : Tuple = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
snake_case_ : int = pipe('''This is a test''' )
self.assertEqual(
lowercase_ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def _snake_case ( self : List[str] ):
import torch
snake_case_ : Any = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def _snake_case ( self : Dict ):
import torch
snake_case_ : int = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=lowercase_ , top_p=0.5 )
def _snake_case ( self : int ):
snake_case_ : int = '''Hello world'''
snake_case_ : List[Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
snake_case_ : Optional[Any] = logging.get_logger('''transformers.generation.tf_utils''' )
else:
snake_case_ : Dict = logging.get_logger('''transformers.generation.utils''' )
snake_case_ : Tuple = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(lowercase_ ) as cl:
snake_case_ : List[Any] = text_generator(lowercase_ , max_length=10 , max_new_tokens=1 )
self.assertIn(lowercase_ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(lowercase_ ) as cl:
snake_case_ : int = text_generator(lowercase_ , max_new_tokens=1 )
self.assertNotIn(lowercase_ , cl.out )
with CaptureLogger(lowercase_ ) as cl:
snake_case_ : Optional[Any] = text_generator(lowercase_ , max_length=10 )
self.assertNotIn(lowercase_ , cl.out )
| 155 | 0 |
def UpperCamelCase ( __lowerCamelCase : int ):
snake_case : Tuple = []
if len(_lowerCAmelCase ) == 1:
return [nums.copy()]
for _ in range(len(_lowerCAmelCase ) ):
snake_case : int = nums.pop(0 )
snake_case : List[str] = permute(_lowerCAmelCase )
for perm in permutations:
perm.append(_lowerCAmelCase )
result.extend(_lowerCAmelCase )
nums.append(_lowerCAmelCase )
return result
def UpperCamelCase ( __lowerCamelCase : Optional[int] ):
def backtrack(__lowerCamelCase : List[Any] ):
if start == len(_lowerCAmelCase ) - 1:
output.append(nums[:] )
else:
for i in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
snake_case : str = nums[i], nums[start]
backtrack(start + 1 )
snake_case : Union[str, Any] = nums[i], nums[start] # backtrack
snake_case : int = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
__lowerCamelCase = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 59 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__=0.0, lowerCamelCase__ = None, lowerCamelCase__ = "geglu", lowerCamelCase__ = None, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = True, lowerCamelCase__ = "layer_norm", lowerCamelCase__ = False, ):
super().__init__()
A : Dict = only_cross_attention
A : Tuple = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero"""
A : Union[str, Any] = (num_embeds_ada_norm is not None) and norm_type == """ada_norm"""
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
A : Dict = AdaLayerNorm(lowerCamelCase__, lowerCamelCase__ )
elif self.use_ada_layer_norm_zero:
A : List[str] = AdaLayerNormZero(lowerCamelCase__, lowerCamelCase__ )
else:
A : Tuple = nn.LayerNorm(lowerCamelCase__, elementwise_affine=lowerCamelCase__ )
A : Any = Attention(
query_dim=lowerCamelCase__, heads=lowerCamelCase__, dim_head=lowerCamelCase__, dropout=lowerCamelCase__, bias=lowerCamelCase__, cross_attention_dim=cross_attention_dim if only_cross_attention else None, upcast_attention=lowerCamelCase__, )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
A : int = (
AdaLayerNorm(lowerCamelCase__, lowerCamelCase__ )
if self.use_ada_layer_norm
else nn.LayerNorm(lowerCamelCase__, elementwise_affine=lowerCamelCase__ )
)
A : Dict = Attention(
query_dim=lowerCamelCase__, cross_attention_dim=cross_attention_dim if not double_self_attention else None, heads=lowerCamelCase__, dim_head=lowerCamelCase__, dropout=lowerCamelCase__, bias=lowerCamelCase__, upcast_attention=lowerCamelCase__, ) # is self-attn if encoder_hidden_states is none
else:
A : Dict = None
A : Dict = None
# 3. Feed-forward
A : Optional[Any] = nn.LayerNorm(lowerCamelCase__, elementwise_affine=lowerCamelCase__ )
A : int = FeedForward(lowerCamelCase__, dropout=lowerCamelCase__, activation_fn=lowerCamelCase__, final_dropout=lowerCamelCase__ )
# let chunk size default to None
A : Optional[Any] = None
A : int = 0
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
# Sets chunk feed-forward
A : List[str] = chunk_size
A : int = dim
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
A : Optional[int] = self.norma(lowerCamelCase__, lowerCamelCase__ )
elif self.use_ada_layer_norm_zero:
A , A , A , A , A : Tuple = self.norma(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, hidden_dtype=hidden_states.dtype )
else:
A : Tuple = self.norma(lowerCamelCase__ )
A : Dict = cross_attention_kwargs if cross_attention_kwargs is not None else {}
A : str = self.attna(
lowerCamelCase__, encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, attention_mask=lowerCamelCase__, **lowerCamelCase__, )
if self.use_ada_layer_norm_zero:
A : List[Any] = gate_msa.unsqueeze(1 ) * attn_output
A : str = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
A : Optional[int] = (
self.norma(lowerCamelCase__, lowerCamelCase__ ) if self.use_ada_layer_norm else self.norma(lowerCamelCase__ )
)
A : Optional[Any] = self.attna(
lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, attention_mask=lowerCamelCase__, **lowerCamelCase__, )
A : Dict = attn_output + hidden_states
# 3. Feed-forward
A : str = self.norma(lowerCamelCase__ )
if self.use_ada_layer_norm_zero:
A : Tuple = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
A : Any = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
A : Optional[Any] = torch.cat(
[self.ff(lowerCamelCase__ ) for hid_slice in norm_hidden_states.chunk(lowerCamelCase__, dim=self._chunk_dim )], dim=self._chunk_dim, )
else:
A : Dict = self.ff(lowerCamelCase__ )
if self.use_ada_layer_norm_zero:
A : Optional[Any] = gate_mlp.unsqueeze(1 ) * ff_output
A : Optional[int] = ff_output + hidden_states
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = 4, lowerCamelCase__ = 0.0, lowerCamelCase__ = "geglu", lowerCamelCase__ = False, ):
super().__init__()
A : str = int(dim * mult )
A : Optional[Any] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
A : Dict = GELU(lowerCamelCase__, lowerCamelCase__ )
if activation_fn == "gelu-approximate":
A : Optional[int] = GELU(lowerCamelCase__, lowerCamelCase__, approximate="""tanh""" )
elif activation_fn == "geglu":
A : Any = GEGLU(lowerCamelCase__, lowerCamelCase__ )
elif activation_fn == "geglu-approximate":
A : Optional[Any] = ApproximateGELU(lowerCamelCase__, lowerCamelCase__ )
A : Dict = nn.ModuleList([] )
# project in
self.net.append(lowerCamelCase__ )
# project dropout
self.net.append(nn.Dropout(lowerCamelCase__ ) )
# project out
self.net.append(nn.Linear(lowerCamelCase__, lowerCamelCase__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(lowerCamelCase__ ) )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
for module in self.net:
A : int = module(lowerCamelCase__ )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = "none" ):
super().__init__()
A : Optional[int] = nn.Linear(lowerCamelCase__, lowerCamelCase__ )
A : int = approximate
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if gate.device.type != "mps":
return F.gelu(lowerCamelCase__, approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ), approximate=self.approximate ).to(dtype=gate.dtype )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : List[Any] = self.proj(lowerCamelCase__ )
A : Union[str, Any] = self.gelu(lowerCamelCase__ )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ ):
super().__init__()
A : Union[str, Any] = nn.Linear(lowerCamelCase__, dim_out * 2 )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if gate.device.type != "mps":
return F.gelu(lowerCamelCase__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A , A : Any = self.proj(lowerCamelCase__ ).chunk(2, dim=-1 )
return hidden_states * self.gelu(lowerCamelCase__ )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ ):
super().__init__()
A : Union[str, Any] = nn.Linear(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : List[Any] = self.proj(lowerCamelCase__ )
return x * torch.sigmoid(1.702 * x )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ ):
super().__init__()
A : Dict = nn.Embedding(lowerCamelCase__, lowerCamelCase__ )
A : Tuple = nn.SiLU()
A : Tuple = nn.Linear(lowerCamelCase__, embedding_dim * 2 )
A : List[str] = nn.LayerNorm(lowerCamelCase__, elementwise_affine=lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
A : int = self.linear(self.silu(self.emb(lowerCamelCase__ ) ) )
A , A : Optional[int] = torch.chunk(lowerCamelCase__, 2 )
A : Tuple = self.norm(lowerCamelCase__ ) * (1 + scale) + shift
return x
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ ):
super().__init__()
A : Union[str, Any] = CombinedTimestepLabelEmbeddings(lowerCamelCase__, lowerCamelCase__ )
A : List[Any] = nn.SiLU()
A : str = nn.Linear(lowerCamelCase__, 6 * embedding_dim, bias=lowerCamelCase__ )
A : List[Any] = nn.LayerNorm(lowerCamelCase__, elementwise_affine=lowerCamelCase__, eps=1e-6 )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__=None ):
A : Tuple = self.linear(self.silu(self.emb(lowerCamelCase__, lowerCamelCase__, hidden_dtype=lowerCamelCase__ ) ) )
A , A , A , A , A , A : Optional[Any] = emb.chunk(6, dim=1 )
A : List[str] = self.norm(lowerCamelCase__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = 1e-5 ):
super().__init__()
A : int = num_groups
A : Dict = eps
if act_fn is None:
A : Union[str, Any] = None
else:
A : Optional[int] = get_activation(lowerCamelCase__ )
A : Dict = nn.Linear(lowerCamelCase__, out_dim * 2 )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
if self.act:
A : Dict = self.act(lowerCamelCase__ )
A : Optional[int] = self.linear(lowerCamelCase__ )
A : int = emb[:, :, None, None]
A , A : Tuple = emb.chunk(2, dim=1 )
A : Any = F.group_norm(lowerCamelCase__, self.num_groups, eps=self.eps )
A : List[str] = x * (1 + scale) + shift
return x
| 116 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowercase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ["""MLukeTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 361 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 35 | 0 |
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int ):
if isinstance(UpperCAmelCase_ , torch.Tensor ):
return image
elif isinstance(UpperCAmelCase_ , PIL.Image.Image ):
lowerCamelCase_ = [image]
if isinstance(image[0] , PIL.Image.Image ):
lowerCamelCase_ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
lowerCamelCase_ = np.concatenate(UpperCAmelCase_ , axis=0 )
lowerCamelCase_ = np.array(UpperCAmelCase_ ).astype(np.floataa ) / 255.0
lowerCamelCase_ = image.transpose(0 , 3 , 1 , 2 )
lowerCamelCase_ = 2.0 * image - 1.0
lowerCamelCase_ = torch.from_numpy(UpperCAmelCase_ )
elif isinstance(image[0] , torch.Tensor ):
lowerCamelCase_ = torch.cat(UpperCAmelCase_ , dim=0 )
return image
def __snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any=0.9995 ):
if not isinstance(UpperCAmelCase_ , np.ndarray ):
lowerCamelCase_ = True
lowerCamelCase_ = va.device
lowerCamelCase_ = va.cpu().numpy()
lowerCamelCase_ = va.cpu().numpy()
lowerCamelCase_ = np.sum(va * va / (np.linalg.norm(UpperCAmelCase_ ) * np.linalg.norm(UpperCAmelCase_ )) )
if np.abs(UpperCAmelCase_ ) > DOT_THRESHOLD:
lowerCamelCase_ = (1 - t) * va + t * va
else:
lowerCamelCase_ = np.arccos(UpperCAmelCase_ )
lowerCamelCase_ = np.sin(UpperCAmelCase_ )
lowerCamelCase_ = theta_a * t
lowerCamelCase_ = np.sin(UpperCAmelCase_ )
lowerCamelCase_ = np.sin(theta_a - theta_t ) / sin_theta_a
lowerCamelCase_ = sin_theta_t / sin_theta_a
lowerCamelCase_ = sa * va + sa * va
if inputs_are_torch:
lowerCamelCase_ = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ )
return va
def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] ):
lowerCamelCase_ = F.normalize(UpperCAmelCase_ , dim=-1 )
lowerCamelCase_ = F.normalize(UpperCAmelCase_ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def __snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] ):
for param in model.parameters():
lowerCamelCase_ = value
class snake_case ( lowercase ):
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , ):
"""simple docstring"""
super().__init__()
self.register_modules(
vae=UpperCamelCase , text_encoder=UpperCamelCase , clip_model=UpperCamelCase , tokenizer=UpperCamelCase , unet=UpperCamelCase , scheduler=UpperCamelCase , feature_extractor=UpperCamelCase , coca_model=UpperCamelCase , coca_tokenizer=UpperCamelCase , coca_transform=UpperCamelCase , )
lowerCamelCase_ = (
feature_extractor.size
if isinstance(feature_extractor.size , UpperCamelCase )
else feature_extractor.size["shortest_edge"]
)
lowerCamelCase_ = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , UpperCamelCase )
set_requires_grad(self.clip_model , UpperCamelCase )
def snake_case ( self , UpperCamelCase = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
self.enable_attention_slicing(UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
set_requires_grad(self.vae , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
set_requires_grad(self.vae , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
set_requires_grad(self.unet , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
set_requires_grad(self.unet , UpperCamelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
# get the original timestep using init_timestep
lowerCamelCase_ = min(int(num_inference_steps * strength ) , UpperCamelCase )
lowerCamelCase_ = max(num_inference_steps - init_timestep , 0 )
lowerCamelCase_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None ):
"""simple docstring"""
if not isinstance(UpperCamelCase , torch.Tensor ):
raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(UpperCamelCase )}''' )
lowerCamelCase_ = image.to(device=UpperCamelCase , dtype=UpperCamelCase )
if isinstance(UpperCamelCase , UpperCamelCase ):
lowerCamelCase_ = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCamelCase )
]
lowerCamelCase_ = torch.cat(UpperCamelCase , dim=0 )
else:
lowerCamelCase_ = self.vae.encode(UpperCamelCase ).latent_dist.sample(UpperCamelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase_ = 0.18_215 * init_latents
lowerCamelCase_ = init_latents.repeat_interleave(UpperCamelCase , dim=0 )
lowerCamelCase_ = randn_tensor(init_latents.shape , generator=UpperCamelCase , device=UpperCamelCase , dtype=UpperCamelCase )
# get latents
lowerCamelCase_ = self.scheduler.add_noise(UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase_ = init_latents
return latents
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self.coca_transform(UpperCamelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
lowerCamelCase_ = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
lowerCamelCase_ = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>" , "" ).rstrip(" .," )
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self.feature_extractor.preprocess(UpperCamelCase )
lowerCamelCase_ = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
lowerCamelCase_ = self.clip_model.get_image_features(UpperCamelCase )
lowerCamelCase_ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=UpperCamelCase )
lowerCamelCase_ = image_embeddings_clip.repeat_interleave(UpperCamelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = latents.detach().requires_grad_()
lowerCamelCase_ = self.scheduler.scale_model_input(UpperCamelCase , UpperCamelCase )
# predict the noise residual
lowerCamelCase_ = self.unet(UpperCamelCase , UpperCamelCase , encoder_hidden_states=UpperCamelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
lowerCamelCase_ = self.scheduler.alphas_cumprod[timestep]
lowerCamelCase_ = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase_ = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
lowerCamelCase_ = torch.sqrt(UpperCamelCase )
lowerCamelCase_ = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , UpperCamelCase ):
lowerCamelCase_ = self.scheduler.sigmas[index]
lowerCamelCase_ = latents - sigma * noise_pred
else:
raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase_ = 1 / 0.18_215 * sample
lowerCamelCase_ = self.vae.decode(UpperCamelCase ).sample
lowerCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ = transforms.Resize(self.feature_extractor_size )(UpperCamelCase )
lowerCamelCase_ = self.normalize(UpperCamelCase ).to(latents.dtype )
lowerCamelCase_ = self.clip_model.get_image_features(UpperCamelCase )
lowerCamelCase_ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=UpperCamelCase )
lowerCamelCase_ = spherical_dist_loss(UpperCamelCase , UpperCamelCase ).mean() * clip_guidance_scale
lowerCamelCase_ = -torch.autograd.grad(UpperCamelCase , UpperCamelCase )[0]
if isinstance(self.scheduler , UpperCamelCase ):
lowerCamelCase_ = latents.detach() + grads * (sigma**2)
lowerCamelCase_ = noise_pred_original
else:
lowerCamelCase_ = noise_pred_original - torch.sqrt(UpperCamelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = 512 , UpperCamelCase = 512 , UpperCamelCase = 0.6 , UpperCamelCase = 50 , UpperCamelCase = 7.5 , UpperCamelCase = 1 , UpperCamelCase = 0.0 , UpperCamelCase = 100 , UpperCamelCase = None , UpperCamelCase = "pil" , UpperCamelCase = True , UpperCamelCase = 0.8 , UpperCamelCase = 0.1 , UpperCamelCase = 0.1 , ):
"""simple docstring"""
if isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) != batch_size:
raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(UpperCamelCase )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(UpperCamelCase , torch.Generator ) and batch_size > 1:
lowerCamelCase_ = [generator] + [None] * (batch_size - 1)
lowerCamelCase_ = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
lowerCamelCase_ = [x[0] for x in coca_is_none if x[1]]
lowerCamelCase_ = ", ".join(UpperCamelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(UpperCamelCase ):
raise ValueError(
f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
lowerCamelCase_ = self.get_image_description(UpperCamelCase )
if style_prompt is None:
if len(UpperCamelCase ):
raise ValueError(
f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
lowerCamelCase_ = self.get_image_description(UpperCamelCase )
# get prompt text embeddings for content and style
lowerCamelCase_ = self.tokenizer(
UpperCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=UpperCamelCase , return_tensors="pt" , )
lowerCamelCase_ = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
lowerCamelCase_ = self.tokenizer(
UpperCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=UpperCamelCase , return_tensors="pt" , )
lowerCamelCase_ = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
lowerCamelCase_ = slerp(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# duplicate text embeddings for each generation per prompt
lowerCamelCase_ = text_embeddings.repeat_interleave(UpperCamelCase , dim=0 )
# set timesteps
lowerCamelCase_ = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
lowerCamelCase_ = {}
if accepts_offset:
lowerCamelCase_ = 1
self.scheduler.set_timesteps(UpperCamelCase , **UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
lowerCamelCase_ ,lowerCamelCase_ = self.get_timesteps(UpperCamelCase , UpperCamelCase , self.device )
lowerCamelCase_ = timesteps[:1].repeat(UpperCamelCase )
# Preprocess image
lowerCamelCase_ = preprocess(UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase_ = self.prepare_latents(
UpperCamelCase , UpperCamelCase , UpperCamelCase , text_embeddings.dtype , self.device , UpperCamelCase )
lowerCamelCase_ = preprocess(UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase_ = self.prepare_latents(
UpperCamelCase , UpperCamelCase , UpperCamelCase , text_embeddings.dtype , self.device , UpperCamelCase )
lowerCamelCase_ = slerp(UpperCamelCase , UpperCamelCase , UpperCamelCase )
if clip_guidance_scale > 0:
lowerCamelCase_ = self.get_clip_image_embeddings(UpperCamelCase , UpperCamelCase )
lowerCamelCase_ = self.get_clip_image_embeddings(UpperCamelCase , UpperCamelCase )
lowerCamelCase_ = slerp(
UpperCamelCase , UpperCamelCase , UpperCamelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase_ = content_text_input.input_ids.shape[-1]
lowerCamelCase_ = self.tokenizer([""] , padding="max_length" , max_length=UpperCamelCase , return_tensors="pt" )
lowerCamelCase_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
lowerCamelCase_ = uncond_embeddings.repeat_interleave(UpperCamelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase_ = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
lowerCamelCase_ = torch.randn(UpperCamelCase , generator=UpperCamelCase , device="cpu" , dtype=UpperCamelCase ).to(
self.device )
else:
lowerCamelCase_ = torch.randn(UpperCamelCase , generator=UpperCamelCase , device=self.device , dtype=UpperCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowerCamelCase_ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase_ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ = {}
if accepts_eta:
lowerCamelCase_ = eta
# check if the scheduler accepts generator
lowerCamelCase_ = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
lowerCamelCase_ = generator
with self.progress_bar(total=UpperCamelCase ):
for i, t in enumerate(UpperCamelCase ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ = self.scheduler.scale_model_input(UpperCamelCase , UpperCamelCase )
# predict the noise residual
lowerCamelCase_ = self.unet(UpperCamelCase , UpperCamelCase , encoder_hidden_states=UpperCamelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase_ ,lowerCamelCase_ = noise_pred.chunk(2 )
lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
lowerCamelCase_ = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
lowerCamelCase_ ,lowerCamelCase_ = self.cond_fn(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase_ = 1 / 0.18_215 * latents
lowerCamelCase_ = self.vae.decode(UpperCamelCase ).sample
lowerCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(UpperCamelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=UpperCamelCase , nsfw_content_detected=UpperCamelCase )
| 55 |
'''simple docstring'''
import math
def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
return math.pow(UpperCAmelCase_ , 2 ) - a
def __snake_case ( UpperCAmelCase_ : float ):
return 2 * x
def __snake_case ( UpperCAmelCase_ : float ):
lowerCamelCase_ = 2.0
while start <= a:
lowerCamelCase_ = math.pow(UpperCAmelCase_ , 2 )
return start
def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : int = 9999 , UpperCAmelCase_ : float = 0.00_0000_0000_0001 ):
if a < 0:
raise ValueError("math domain error" )
lowerCamelCase_ = get_initial_point(UpperCAmelCase_ )
for _ in range(UpperCAmelCase_ ):
lowerCamelCase_ = value
lowerCamelCase_ = value - fx(UpperCAmelCase_ , UpperCAmelCase_ ) / fx_derivative(UpperCAmelCase_ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 55 | 1 |
def _lowerCamelCase( lowercase__ , lowercase__ ) -> str:
'''simple docstring'''
__lowercase= 0
__lowercase= len(lowercase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__lowercase= left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowercase__ ):
return None
__lowercase= sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__lowercase= left
__lowercase= point
elif point > right:
__lowercase= right
__lowercase= point
else:
if item < current_item:
__lowercase= point - 1
else:
__lowercase= point + 1
return None
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Dict:
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__lowercase= left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowercase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
elif point > right:
return interpolation_search_by_recursion(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
lowercase__ , lowercase__ , lowercase__ , point - 1 )
else:
return interpolation_search_by_recursion(
lowercase__ , lowercase__ , point + 1 , lowercase__ )
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
if collection != sorted(lowercase__ ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
lowerCAmelCase = 0
if debug == 1:
lowerCAmelCase = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
lowerCAmelCase = 6_7
lowerCAmelCase = interpolation_search(collection, target)
if result is not None:
print(F'{target} found at positions: {result}')
else:
print('''Not found''')
| 304 |
def _lowerCamelCase( lowercase__ = 1_0_0_0 ) -> int:
'''simple docstring'''
__lowercase= 2**power
__lowercase= str(lowercase__ )
__lowercase= list(lowercase__ )
__lowercase= 0
for i in list_num:
sum_of_num += int(lowercase__ )
return sum_of_num
if __name__ == "__main__":
lowerCAmelCase = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
lowerCAmelCase = solution(power)
print('''Sum of the digits is: ''', result)
| 304 | 1 |
"""simple docstring"""
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_a = HUGGINGFACE_HUB_CACHE
_a = 'config.json'
_a = 'diffusion_pytorch_model.bin'
_a = 'diffusion_flax_model.msgpack'
_a = 'model.onnx'
_a = 'diffusion_pytorch_model.safetensors'
_a = 'weights.pb'
_a = 'https://huggingface.co'
_a = default_cache_path
_a = 'diffusers_modules'
_a = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules'))
_a = ['fp16', 'non-ema']
_a = '.self_attn'
| 61 |
"""simple docstring"""
def _snake_case ( lowerCamelCase__ : str ) -> str:
if not all(char in "01" for char in bin_string ):
raise ValueError("Non-binary value was passed to the function" )
if not bin_string:
raise ValueError("Empty string was passed to the function" )
lowerCamelCase_ : Optional[Any] =""
while len(lowerCamelCase__ ) % 3 != 0:
lowerCamelCase_ : Any ="0" + bin_string
lowerCamelCase_ : int =[
bin_string[index : index + 3]
for index in range(len(lowerCamelCase__ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
lowerCamelCase_ : int =0
for index, val in enumerate(lowerCamelCase__ ):
oct_val += int(2 ** (2 - index) * int(lowerCamelCase__ ) )
oct_string += str(lowerCamelCase__ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 144 | 0 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
_UpperCAmelCase : Optional[Any] = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def lowerCamelCase_ ():
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 |
"""simple docstring"""
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCAmelCase ( a ,unittest.TestCase ):
'''simple docstring'''
a__ =TransfoXLTokenizer
a__ =False
a__ =False
def __lowerCAmelCase ( self ) -> List[str]:
super().setUp()
_UpperCAmelCase : Dict = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
_UpperCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCAmelCase ( self , **A ) -> Dict:
_UpperCAmelCase : Union[str, Any] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **A )
def __lowerCAmelCase ( self , A ) -> str:
_UpperCAmelCase : str = '''<unk> UNwanted , running'''
_UpperCAmelCase : Union[str, Any] = '''<unk> unwanted, running'''
return input_text, output_text
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Any = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=A )
_UpperCAmelCase : Union[str, Any] = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(A , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [0, 4, 8, 7] )
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : str = TransfoXLTokenizer(lower_case=A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = TransfoXLTokenizer(lower_case=A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : Tuple = TransfoXLTokenizer(lower_case=A )
_UpperCAmelCase : Optional[Any] = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
_UpperCAmelCase : Optional[Any] = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(A ) , A )
self.assertEqual(tokenizer.convert_tokens_to_string(A ) , A )
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : str = self.get_tokenizer()
_UpperCAmelCase : List[Any] = len(A )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(A ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
| 68 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _a ( metaclass=__snake_case ):
__a : Tuple = ["flax"]
def __init__( self : str , *lowercase : int , **lowercase : List[str] ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def A ( cls : Optional[Any] , *lowercase : Dict , **lowercase : List[str] ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def A ( cls : Optional[int] , *lowercase : Optional[int] , **lowercase : int ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _a ( metaclass=__snake_case ):
__a : Any = ["flax"]
def __init__( self : int , *lowercase : List[Any] , **lowercase : Tuple ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def A ( cls : Optional[int] , *lowercase : Optional[int] , **lowercase : List[str] ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def A ( cls : Tuple , *lowercase : Tuple , **lowercase : Any ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _a ( metaclass=__snake_case ):
__a : Dict = ["flax"]
def __init__( self : Dict , *lowercase : Optional[int] , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def A ( cls : Tuple , *lowercase : Optional[Any] , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def A ( cls : int , *lowercase : Optional[Any] , **lowercase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _a ( metaclass=__snake_case ):
__a : List[str] = ["flax"]
def __init__( self : str , *lowercase : List[str] , **lowercase : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : Any , **lowercase : Any ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def A ( cls : Dict , *lowercase : int , **lowercase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _a ( metaclass=__snake_case ):
__a : int = ["flax"]
def __init__( self : Dict , *lowercase : Tuple , **lowercase : List[str] ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def A ( cls : Optional[Any] , *lowercase : List[Any] , **lowercase : str ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : Dict , **lowercase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _a ( metaclass=__snake_case ):
__a : Optional[int] = ["flax"]
def __init__( self : str , *lowercase : Dict , **lowercase : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def A ( cls : int , *lowercase : int , **lowercase : Tuple ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def A ( cls : str , *lowercase : Union[str, Any] , **lowercase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _a ( metaclass=__snake_case ):
__a : List[Any] = ["flax"]
def __init__( self : Union[str, Any] , *lowercase : Tuple , **lowercase : int ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def A ( cls : Tuple , *lowercase : List[Any] , **lowercase : Dict ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def A ( cls : Dict , *lowercase : List[Any] , **lowercase : str ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _a ( metaclass=__snake_case ):
__a : Tuple = ["flax"]
def __init__( self : str , *lowercase : Any , **lowercase : int ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def A ( cls : Dict , *lowercase : Optional[int] , **lowercase : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def A ( cls : str , *lowercase : Union[str, Any] , **lowercase : Dict ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _a ( metaclass=__snake_case ):
__a : str = ["flax"]
def __init__( self : Optional[Any] , *lowercase : str , **lowercase : List[str] ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def A ( cls : List[str] , *lowercase : Dict , **lowercase : int ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def A ( cls : str , *lowercase : Optional[Any] , **lowercase : int ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _a ( metaclass=__snake_case ):
__a : Union[str, Any] = ["flax"]
def __init__( self : Any , *lowercase : Tuple , **lowercase : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def A ( cls : Optional[int] , *lowercase : List[Any] , **lowercase : str ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def A ( cls : List[Any] , *lowercase : Any , **lowercase : Any ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _a ( metaclass=__snake_case ):
__a : Tuple = ["flax"]
def __init__( self : Any , *lowercase : Optional[Any] , **lowercase : Dict ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def A ( cls : Tuple , *lowercase : Union[str, Any] , **lowercase : List[str] ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def A ( cls : List[Any] , *lowercase : str , **lowercase : str ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _a ( metaclass=__snake_case ):
__a : Optional[Any] = ["flax"]
def __init__( self : Dict , *lowercase : int , **lowercase : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def A ( cls : int , *lowercase : int , **lowercase : Tuple ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def A ( cls : Optional[Any] , *lowercase : List[Any] , **lowercase : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _a ( metaclass=__snake_case ):
__a : Optional[int] = ["flax"]
def __init__( self : List[str] , *lowercase : Dict , **lowercase : Dict ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def A ( cls : Dict , *lowercase : List[Any] , **lowercase : Dict ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def A ( cls : int , *lowercase : Any , **lowercase : Any ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
| 34 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = True
A__ : bool = False
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> List[str]:
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Optional[int] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : List[Any] = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : List[Any] = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : int = resnets
UpperCAmelCase_ : Tuple = attentions
if self.add_downsample:
UpperCAmelCase_ : List[Any] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Optional[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: int=True ) -> int:
UpperCAmelCase_ : List[Any] = ()
for resnet, attn in zip(self.resnets ,self.attentions ):
UpperCAmelCase_ : str = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : List[Any] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : bool = True
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> int:
UpperCAmelCase_ : List[str] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : int = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : Dict = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = resnets
if self.add_downsample:
UpperCAmelCase_ : List[str] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any]=True ) -> Any:
UpperCAmelCase_ : Union[str, Any] = ()
for resnet in self.resnets:
UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : List[str] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = True
A__ : bool = False
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: str ) -> Any:
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : List[str] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : int = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : int = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : Optional[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : int = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = resnets
UpperCAmelCase_ : Dict = attentions
if self.add_upsample:
UpperCAmelCase_ : Optional[Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Optional[int] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: Any ,lowerCamelCase_: str ,lowerCamelCase_: List[str]=True ) -> List[str]:
for resnet, attn in zip(self.resnets ,self.attentions ):
# pop res hidden states
UpperCAmelCase_ : List[str] = res_hidden_states_tuple[-1]
UpperCAmelCase_ : Union[str, Any] = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : Optional[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
if self.add_upsample:
UpperCAmelCase_ : Dict = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : bool = True
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> Dict:
UpperCAmelCase_ : Any = []
for i in range(self.num_layers ):
UpperCAmelCase_ : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : Any = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : str = resnets
if self.add_upsample:
UpperCAmelCase_ : Union[str, Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Dict ,lowerCamelCase_: Dict ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any=True ) -> List[str]:
for resnet in self.resnets:
# pop res hidden states
UpperCAmelCase_ : Dict = res_hidden_states_tuple[-1]
UpperCAmelCase_ : str = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : List[str] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
if self.add_upsample:
UpperCAmelCase_ : Optional[Any] = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> List[str]:
# there is always at least one resnet
UpperCAmelCase_ : List[Any] = [
FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
]
UpperCAmelCase_ : Any = []
for _ in range(self.num_layers ):
UpperCAmelCase_ : Optional[Any] = FlaxTransformeraDModel(
in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : Any = FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Dict = resnets
UpperCAmelCase_ : Any = attentions
def __call__( self: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any]=True ) -> List[Any]:
UpperCAmelCase_ : List[Any] = self.resnets[0](lowerCamelCase_ ,lowerCamelCase_ )
for attn, resnet in zip(self.attentions ,self.resnets[1:] ):
UpperCAmelCase_ : Optional[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
return hidden_states
| 345 | 0 |
'''simple docstring'''
from random import randint, random
def _lowerCamelCase ( lowercase : int , lowercase : int , lowercase : int , lowercase : bool = False , lowercase : bool = False , lowercase : int = 5 , ) -> list:
_a = [[-1] * number_of_cells] # Create a highway without any car
_a = 0
_a = max(lowercase , 0 )
while i < number_of_cells:
_a = (
randint(0 , lowercase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def _lowerCamelCase ( lowercase : list , lowercase : int ) -> int:
_a = 0
_a = highway_now[car_index + 1 :]
for cell in range(len(lowercase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowercase , -1 )
def _lowerCamelCase ( lowercase : list , lowercase : float , lowercase : int ) -> list:
_a = len(lowercase )
# Beforce calculations, the highway is empty
_a = [-1] * number_of_cells
for car_index in range(lowercase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_a = min(highway_now[car_index] + 1 , lowercase )
# Number of empty cell before the next car
_a = get_distance(lowercase , lowercase ) - 1
# We can't have the car causing an accident
_a = min(next_highway[car_index] , lowercase )
if random() < probability:
# Randomly, a driver will slow down
_a = max(next_highway[car_index] - 1 , 0 )
return next_highway
def _lowerCamelCase ( lowercase : list , lowercase : int , lowercase : float , lowercase : int ) -> list:
_a = len(highway[0] )
for i in range(lowercase ):
_a = update(highway[i] , lowercase , lowercase )
_a = [-1] * number_of_cells
for car_index in range(lowercase ):
_a = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_a = (car_index + speed) % number_of_cells
# Commit the change of position
_a = speed
highway.append(lowercase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346 |
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
lowerCAmelCase_ : List[Any] = logging.getLogger(__name__)
lowerCAmelCase_ : List[Any] = {'facebook/bart-base': BartForConditionalGeneration}
lowerCAmelCase_ : int = {'facebook/bart-base': BartTokenizer}
def _lowerCamelCase ( ) -> Union[str, Any]:
_a = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" , type=lowercase , default=lowercase , help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" , type=lowercase , default=5 , help="The maximum total input sequence length after tokenization." , )
parser.add_argument(
"--num_beams" , type=lowercase , default=lowercase , help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) , )
parser.add_argument(
"--model_name_or_path" , type=lowercase , help="Path to pretrained model or model identifier from huggingface.co/models." , required=lowercase , )
parser.add_argument(
"--config_name" , type=lowercase , default=lowercase , help="Pretrained config name or path if not the same as model_name" , )
parser.add_argument(
"--device" , type=lowercase , default="cpu" , help="Device where the model will be run" , )
parser.add_argument("--output_file_path" , type=lowercase , default=lowercase , help="Where to store the final ONNX file." )
_a = parser.parse_args()
return args
def _lowerCamelCase ( lowercase : Any , lowercase : Tuple="cpu" ) -> Optional[Any]:
_a = model_dict[model_name].from_pretrained(lowercase ).to(lowercase )
_a = tokenizer_dict[model_name].from_pretrained(lowercase )
if model_name in ["facebook/bart-base"]:
_a = 0
_a = None
_a = 0
return huggingface_model, tokenizer
def _lowerCamelCase ( lowercase : List[str] , lowercase : Tuple , lowercase : int , lowercase : Any , lowercase : Dict ) -> Any:
model.eval()
_a = None
_a = torch.jit.script(BARTBeamSearchGenerator(lowercase ) )
with torch.no_grad():
_a = "My friends are cool but they eat too many carbs."
_a = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="pt" ).to(model.device )
_a = model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=lowercase , max_length=lowercase , early_stopping=lowercase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
lowercase , (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , lowercase , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} , example_outputs=lowercase , )
logger.info("Model exported to {}".format(lowercase ) )
_a = remove_dup_initializers(os.path.abspath(lowercase ) )
logger.info("Deduplicated and optimized model written to {}".format(lowercase ) )
_a = onnxruntime.InferenceSession(lowercase )
_a = ort_sess.run(
lowercase , {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(lowercase ),
"max_length": np.array(lowercase ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def _lowerCamelCase ( ) -> Any:
_a = parse_args()
_a = 5
_a = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_a = torch.device(args.device )
_a , _a = load_model_tokenizer(args.model_name_or_path , lowercase )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(lowercase )
if args.max_length:
_a = args.max_length
if args.num_beams:
_a = args.num_beams
if args.output_file_path:
_a = args.output_file_path
else:
_a = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(lowercase , lowercase , lowercase , lowercase , lowercase )
if __name__ == "__main__":
main()
| 346 | 1 |
'''simple docstring'''
import random
from typing import Any
def a__ ( a__ ):
"""simple docstring"""
for _ in range(len(a__ ) ):
__SCREAMING_SNAKE_CASE = random.randint(0 , len(a__ ) - 1 )
__SCREAMING_SNAKE_CASE = random.randint(0 , len(a__ ) - 1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase : List[Any] = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase : str = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 267 |
'''simple docstring'''
from itertools import count
def a__ ( a__ = 50 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [1] * min_block_length
for n in count(a__ ):
fill_count_functions.append(1 )
for block_length in range(a__ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_00_00_00:
break
return n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 267 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
__lowercase: List[Any] = logging.get_logger(__name__)
__lowercase: str = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class UpperCAmelCase ( a_):
_lowerCamelCase : Any = 'dpt'
def __init__( self : List[Any], a_ : List[Any]=768, a_ : int=12, a_ : List[Any]=12, a_ : int=3072, a_ : List[Any]="gelu", a_ : Any=0.0, a_ : Union[str, Any]=0.0, a_ : Union[str, Any]=0.02, a_ : int=1e-1_2, a_ : Optional[int]=384, a_ : Dict=16, a_ : Union[str, Any]=3, a_ : Dict=False, a_ : int=True, a_ : Dict=[2, 5, 8, 11], a_ : Dict="project", a_ : List[Any]=[4, 2, 1, 0.5], a_ : str=[96, 192, 384, 768], a_ : int=256, a_ : Union[str, Any]=-1, a_ : Optional[Any]=False, a_ : Optional[Any]=True, a_ : str=0.4, a_ : List[str]=255, a_ : List[str]=0.1, a_ : int=[1, 1024, 24, 24], a_ : List[Any]=[0, 1], a_ : List[Any]=None, **a_ : Union[str, Any], ):
"""simple docstring"""
super().__init__(**lowercase_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("Initializing the config with a `BiT` backbone." )
UpperCamelCase__ = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
UpperCamelCase__ = BitConfig(**lowercase_ )
elif isinstance(lowercase_, lowercase_ ):
logger.info("Initializing the config with a `BiT` backbone." )
UpperCamelCase__ = BitConfig(**lowercase_ )
elif isinstance(lowercase_, lowercase_ ):
UpperCamelCase__ = backbone_config
else:
raise ValueError(
f'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.' )
UpperCamelCase__ = backbone_featmap_shape
UpperCamelCase__ = neck_ignore_stages
if readout_type != "project":
raise ValueError("Readout type must be \'project\' when using `DPT-hybrid` mode." )
else:
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = []
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("Readout_type must be one of [\'ignore\', \'add\', \'project\']" )
UpperCamelCase__ = readout_type
UpperCamelCase__ = reassemble_factors
UpperCamelCase__ = neck_hidden_sizes
UpperCamelCase__ = fusion_hidden_size
UpperCamelCase__ = head_in_index
UpperCamelCase__ = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
UpperCamelCase__ = use_auxiliary_head
UpperCamelCase__ = auxiliary_loss_weight
UpperCamelCase__ = semantic_loss_ignore_index
UpperCamelCase__ = semantic_classifier_dropout
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCamelCase__ = self.backbone_config.to_dict()
UpperCamelCase__ = self.__class__.model_type
return output
| 357 |
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Union[str, Any] = CLIPTokenizer
_lowerCamelCase : Dict = CLIPTokenizerFast
_lowerCamelCase : int = True
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Tuple = False
def lowercase_ ( self : Tuple ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCamelCase__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCamelCase__ = dict(zip(a_, range(len(a_ ) ) ) )
UpperCamelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
UpperCamelCase__ = {"unk_token": "<unk>"}
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def lowercase_ ( self : Optional[Any], **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : str, **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : List[Any], a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = "lower newer"
return input_text, output_text
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = CLIPTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
UpperCamelCase__ = tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ )
@require_ftfy
def lowercase_ ( self : Dict ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = self.tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCamelCase__ = "xa\u0303y" + " " + "x\xe3y"
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of space type
UpperCamelCase__ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of line break type
UpperCamelCase__ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ = f'{text_of_1_token} {text_of_1_token}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )), )
UpperCamelCase__ = f' {text}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(a_ ) + 1, 1 + len(a_ ) + 1 + len(a_ )), )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(a_ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def lowercase_ ( self : List[str] ):
"""simple docstring"""
pass | 31 | 0 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCAmelCase ( a_ , a_=0.999 , a_="cosine" , ) -> Any:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(a_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(a_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
A_ : Optional[int] = []
for i in range(UpperCAmelCase__ ):
A_ : Optional[Any] = i / num_diffusion_timesteps
A_ : Optional[int] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCAmelCase__ ) / alpha_bar_fn(UpperCAmelCase__ ) , UpperCAmelCase__ ) )
return torch.tensor(UpperCAmelCase__ , dtype=torch.floataa )
class _lowerCAmelCase ( _snake_case, _snake_case ):
"""simple docstring"""
lowerCamelCase = [e.name for e in KarrasDiffusionSchedulers]
lowerCamelCase = 2
@register_to_config
def __init__( self , _lowerCamelCase = 1000 , _lowerCamelCase = 0.0_0085 , _lowerCamelCase = 0.012 , _lowerCamelCase = "linear" , _lowerCamelCase = None , _lowerCamelCase = "epsilon" , _lowerCamelCase = "linspace" , _lowerCamelCase = 0 , ) -> Tuple:
if trained_betas is not None:
A_ : str = torch.tensor(UpperCamelCase__ , dtype=torch.floataa )
elif beta_schedule == "linear":
A_ : str = torch.linspace(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A_ : str = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCamelCase__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A_ : Any = betas_for_alpha_bar(UpperCamelCase__ )
else:
raise NotImplementedError(F"{beta_schedule} does is not implemented for {self.__class__}" )
A_ : Any = 1.0 - self.betas
A_ : List[str] = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=None ) -> List[str]:
if schedule_timesteps is None:
A_ : Optional[Any] = self.timesteps
A_ : int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
A_ : Optional[Any] = 1 if len(UpperCamelCase__ ) > 1 else 0
else:
A_ : Tuple = timestep.cpu().item() if torch.is_tensor(UpperCamelCase__ ) else timestep
A_ : Optional[int] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , ) -> torch.FloatTensor:
A_ : str = self.index_for_timestep(UpperCamelCase__ )
if self.state_in_first_order:
A_ : str = self.sigmas[step_index]
else:
A_ : Any = self.sigmas_interpol[step_index]
A_ : int = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , ) -> List[str]:
A_ : Tuple = num_inference_steps
A_ : str = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
A_ : int = np.linspace(0 , num_train_timesteps - 1 , UpperCamelCase__ , dtype=UpperCamelCase__ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
A_ : Optional[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A_ : Union[str, Any] = (np.arange(0 , UpperCamelCase__ ) * step_ratio).round()[::-1].copy().astype(UpperCamelCase__ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
A_ : int = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A_ : Tuple = (np.arange(UpperCamelCase__ , 0 , -step_ratio )).round().copy().astype(UpperCamelCase__ )
timesteps -= 1
else:
raise ValueError(
F"{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'." )
A_ : Any = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
A_ : int = torch.from_numpy(np.log(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A_ : List[str] = np.interp(UpperCamelCase__ , np.arange(0 , len(UpperCamelCase__ ) ) , UpperCamelCase__ )
A_ : Optional[int] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
A_ : int = torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ )
# interpolate sigmas
A_ : List[str] = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
A_ : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
A_ : Optional[Any] = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(UpperCamelCase__ ).startswith("""mps""" ):
# mps does not support float64
A_ : Union[str, Any] = torch.from_numpy(UpperCamelCase__ ).to(UpperCamelCase__ , dtype=torch.floataa )
else:
A_ : Union[str, Any] = torch.from_numpy(UpperCamelCase__ ).to(UpperCamelCase__ )
# interpolate timesteps
A_ : Optional[Any] = self.sigma_to_t(UpperCamelCase__ ).to(UpperCamelCase__ , dtype=timesteps.dtype )
A_ : Optional[Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
A_ : List[Any] = torch.cat([timesteps[:1], interleaved_timesteps] )
A_ : Optional[int] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
A_ : Dict = defaultdict(UpperCamelCase__ )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Dict:
A_ : str = sigma.log()
# get distribution
A_ : List[str] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
A_ : Tuple = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
A_ : Dict = low_idx + 1
A_ : Optional[int] = self.log_sigmas[low_idx]
A_ : Optional[Any] = self.log_sigmas[high_idx]
# interpolate sigmas
A_ : List[Any] = (low - log_sigma) / (low - high)
A_ : int = w.clamp(0 , 1 )
# transform interpolation to time range
A_ : Optional[Any] = (1 - w) * low_idx + w * high_idx
A_ : List[Any] = t.view(sigma.shape )
return t
@property
def UpperCAmelCase_ ( self ) -> int:
return self.sample is None
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , ) -> Union[SchedulerOutput, Tuple]:
A_ : int = self.index_for_timestep(UpperCamelCase__ )
# advance index counter by 1
A_ : Optional[int] = timestep.cpu().item() if torch.is_tensor(UpperCamelCase__ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
A_ : List[str] = self.sigmas[step_index]
A_ : Tuple = self.sigmas_interpol[step_index + 1]
A_ : Dict = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
A_ : Tuple = self.sigmas[step_index - 1]
A_ : int = self.sigmas_interpol[step_index]
A_ : Optional[int] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
A_ : Optional[Any] = 0
A_ : List[str] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
A_ : Dict = sigma_hat if self.state_in_first_order else sigma_interpol
A_ : Optional[Any] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
A_ : str = sigma_hat if self.state_in_first_order else sigma_interpol
A_ : Union[str, Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
A_ : Union[str, Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
A_ : str = sigma_interpol - sigma_hat
# store for 2nd order step
A_ : Optional[int] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
A_ : Tuple = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
A_ : List[Any] = sigma_next - sigma_hat
A_ : Dict = self.sample
A_ : Optional[Any] = None
A_ : Optional[Any] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase__ )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> torch.FloatTensor:
A_ : Optional[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCamelCase__ ):
# mps does not support float64
A_ : Union[str, Any] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
A_ : str = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
A_ : List[str] = self.timesteps.to(original_samples.device )
A_ : Tuple = timesteps.to(original_samples.device )
A_ : int = [self.index_for_timestep(UpperCamelCase__ , UpperCamelCase__ ) for t in timesteps]
A_ : Any = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
A_ : Dict = sigma.unsqueeze(-1 )
A_ : List[Any] = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> Dict:
return self.config.num_train_timesteps
| 344 |
'''simple docstring'''
from functools import lru_cache
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> set:
A_ = 2
A_ = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(UpperCAmelCase__ )
if n > 1:
factors.add(UpperCAmelCase__ )
return factors
@lru_cache
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return len(unique_prime_factors(UpperCAmelCase__ ) )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
return len(set(UpperCAmelCase__ ) ) in (0, 1)
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list:
A_ = 2
while True:
# Increment each value of a generated range
A_ = [base + i for i in range(UpperCAmelCase__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
A_ = [upf_len(UpperCAmelCase__ ) for x in group]
checker.append(UpperCAmelCase__ )
# If all numbers in the list are equal, return the group variable.
if equality(UpperCAmelCase__ ):
return group
# Increment our base variable by 1
base += 1
def UpperCAmelCase__ ( UpperCAmelCase__ = 4 ) -> int:
A_ = run(UpperCAmelCase__ )
return results[0] if len(UpperCAmelCase__ ) else None
if __name__ == "__main__":
print(solution())
| 162 | 0 |
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __lowerCamelCase ( __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Union[str, Any] ):
# Construct model
if openai_config_file == "":
a__: Optional[Any] =OpenAIGPTConfig()
else:
a__: Dict =OpenAIGPTConfig.from_json_file(__magic_name__ )
a__: List[str] =OpenAIGPTModel(__magic_name__ )
# Load weights from numpy
load_tf_weights_in_openai_gpt(__magic_name__ , __magic_name__ , __magic_name__ )
# Save pytorch-model
a__: int =pytorch_dump_folder_path + "/" + WEIGHTS_NAME
a__: Optional[int] =pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , __magic_name__ )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--openai_checkpoint_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the TensorFlow checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--openai_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
__UpperCAmelCase = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 42 |
def __lowerCamelCase ( __magic_name__ : int ):
if not isinstance(__magic_name__ , __magic_name__ ):
a__: List[str] =F"Input value of [number={number}] must be an integer"
raise TypeError(__magic_name__ )
if number < 1:
a__: Union[str, Any] =F"Input value of [number={number}] must be > 0"
raise ValueError(__magic_name__ )
a__: List[Any] =1
for i in range(1 , __magic_name__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 | 1 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : Tuple ,_a : NestedDataStructureLike[PathLike] ,_a : Optional[NamedSplit] = None ,_a : Optional[Features] = None ,_a : str = None ,_a : bool = False ,_a : bool = False ,_a : Optional[str] = None ,_a : Optional[int] = None ,**_a : Union[str, Any] ,):
'''simple docstring'''
super().__init__(
_a ,split=_a ,features=_a ,cache_dir=_a ,keep_in_memory=_a ,streaming=_a ,num_proc=_a ,**_a ,)
_a : List[Any] = field
_a : List[Any] = path_or_paths if isinstance(_a ,_a ) else {self.split: path_or_paths}
_a : int = Json(
cache_dir=_a ,data_files=_a ,features=_a ,field=_a ,**_a ,)
def __lowercase ( self : str ):
'''simple docstring'''
if self.streaming:
_a : Tuple = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_a : Any = None
_a : Optional[Any] = None
_a : Dict = None
_a : Dict = None
self.builder.download_and_prepare(
download_config=_a ,download_mode=_a ,verification_mode=_a ,base_path=_a ,num_proc=self.num_proc ,)
_a : List[Any] = self.builder.as_dataset(
split=self.split ,verification_mode=_a ,in_memory=self.keep_in_memory )
return dataset
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Any ,_a : Dataset ,_a : Union[PathLike, BinaryIO] ,_a : Optional[int] = None ,_a : Optional[int] = None ,**_a : Any ,):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
_a : Union[str, Any] = dataset
_a : Optional[Any] = path_or_buf
_a : Optional[int] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_a : Any = num_proc
_a : Optional[Any] = 'utf-8'
_a : Any = to_json_kwargs
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : List[str] = self.to_json_kwargs.pop('path_or_buf' ,_a )
_a : List[str] = self.to_json_kwargs.pop('orient' ,'records' )
_a : Dict = self.to_json_kwargs.pop('lines' ,True if orient == 'records' else False )
_a : Optional[Any] = self.to_json_kwargs.pop('index' ,False if orient in ['split', 'table'] else True )
_a : Union[str, Any] = self.to_json_kwargs.pop('compression' ,_a )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf ,'wb' ,compression=_a ) as buffer:
_a : str = self._write(file_obj=_a ,orient=_a ,lines=_a ,index=_a ,**self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
' was passed. Please provide a local path instead.' )
_a : List[Any] = self._write(
file_obj=self.path_or_buf ,orient=_a ,lines=_a ,index=_a ,**self.to_json_kwargs )
return written
def __lowercase ( self : Dict ,_a : Optional[Any] ):
'''simple docstring'''
_a, _a, _a, _a, _a : Optional[Any] = args
_a : Any = query_table(
table=self.dataset.data ,key=slice(_a ,offset + self.batch_size ) ,indices=self.dataset._indices ,)
_a : List[Any] = batch.to_pandas().to_json(
path_or_buf=_a ,orient=_a ,lines=_a ,index=_a ,**_a )
if not json_str.endswith('\n' ):
json_str += "\n"
return json_str.encode(self.encoding )
def __lowercase ( self : Any ,_a : BinaryIO ,_a : Optional[int] ,_a : Any ,_a : Union[str, Any] ,**_a : str ,):
'''simple docstring'''
_a : Dict = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,self.batch_size ) ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating json from Arrow format' ,):
_a : Any = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(_a )
else:
_a, _a : Optional[int] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json ,[(offset, orient, lines, index, to_json_kwargs) for offset in range(0 ,_a ,_a )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating json from Arrow format' ,):
written += file_obj.write(_a )
return written
| 271 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__lowerCAmelCase = HUGGINGFACE_HUB_CACHE
__lowerCAmelCase = """config.json"""
__lowerCAmelCase = """diffusion_pytorch_model.bin"""
__lowerCAmelCase = """diffusion_flax_model.msgpack"""
__lowerCAmelCase = """model.onnx"""
__lowerCAmelCase = """diffusion_pytorch_model.safetensors"""
__lowerCAmelCase = """weights.pb"""
__lowerCAmelCase = """https://huggingface.co"""
__lowerCAmelCase = default_cache_path
__lowerCAmelCase = """diffusers_modules"""
__lowerCAmelCase = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
__lowerCAmelCase = ["""fp16""", """non-ema"""]
__lowerCAmelCase = """.self_attn"""
| 271 | 1 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase :Dict = logging.get_logger(__name__)
lowerCAmelCase :str = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Optional[int] = """sew-d"""
def __init__( self : Dict , _A : str=32 , _A : Optional[int]=768 , _A : int=12 , _A : List[Any]=12 , _A : Optional[int]=3072 , _A : Tuple=2 , _A : Optional[int]=512 , _A : str=256 , _A : int=True , _A : Optional[int]=True , _A : Optional[Any]=("p2c", "c2p") , _A : Optional[Any]="layer_norm" , _A : Tuple="gelu_python" , _A : Optional[Any]=0.1 , _A : List[str]=0.1 , _A : int=0.1 , _A : List[Any]=0.0 , _A : List[str]=0.1 , _A : Any=0.02 , _A : List[Any]=1E-7 , _A : Tuple=1E-5 , _A : Optional[Any]="group" , _A : Optional[int]="gelu" , _A : Union[str, Any]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , _A : Tuple=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _A : Tuple=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _A : Optional[Any]=False , _A : Optional[Any]=128 , _A : Tuple=16 , _A : Dict=True , _A : str=0.05 , _A : int=10 , _A : Optional[Any]=2 , _A : Union[str, Any]=0.0 , _A : str=10 , _A : int=0 , _A : Any="mean" , _A : Any=False , _A : Optional[int]=False , _A : Any=256 , _A : Optional[int]=0 , _A : Any=1 , _A : Any=2 , **_A : List[Any] , ) -> List[Any]:
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A )
__magic_name__ : Dict = hidden_size
__magic_name__ : int = feat_extract_norm
__magic_name__ : List[str] = feat_extract_activation
__magic_name__ : Tuple = list(_A )
__magic_name__ : int = list(_A )
__magic_name__ : str = list(_A )
__magic_name__ : Union[str, Any] = conv_bias
__magic_name__ : Union[str, Any] = num_conv_pos_embeddings
__magic_name__ : Any = num_conv_pos_embedding_groups
__magic_name__ : str = len(self.conv_dim )
__magic_name__ : str = num_hidden_layers
__magic_name__ : Dict = intermediate_size
__magic_name__ : Optional[int] = squeeze_factor
__magic_name__ : Dict = max_position_embeddings
__magic_name__ : List[str] = position_buckets
__magic_name__ : Optional[int] = share_att_key
__magic_name__ : Optional[Any] = relative_attention
__magic_name__ : int = norm_rel_ebd
__magic_name__ : Dict = list(_A )
__magic_name__ : Tuple = hidden_act
__magic_name__ : Dict = num_attention_heads
__magic_name__ : Tuple = hidden_dropout
__magic_name__ : Optional[Any] = attention_dropout
__magic_name__ : List[Any] = activation_dropout
__magic_name__ : Optional[int] = feat_proj_dropout
__magic_name__ : Dict = final_dropout
__magic_name__ : int = layer_norm_eps
__magic_name__ : Any = feature_layer_norm_eps
__magic_name__ : str = initializer_range
__magic_name__ : Optional[int] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'
F'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__magic_name__ : int = apply_spec_augment
__magic_name__ : List[str] = mask_time_prob
__magic_name__ : Dict = mask_time_length
__magic_name__ : Union[str, Any] = mask_time_min_masks
__magic_name__ : int = mask_feature_prob
__magic_name__ : Tuple = mask_feature_length
__magic_name__ : Any = mask_feature_min_masks
# ctc loss
__magic_name__ : Any = ctc_loss_reduction
__magic_name__ : Union[str, Any] = ctc_zero_infinity
# sequence classification
__magic_name__ : str = use_weighted_layer_sum
__magic_name__ : Dict = classifier_proj_size
@property
def __lowerCAmelCase ( self : List[Any] ) -> str:
return functools.reduce(operator.mul , self.conv_stride , 1 ) | 275 |
'''simple docstring'''
lowerCAmelCase :Union[str, Any] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCAmelCase :Union[str, Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCAmelCase :Tuple = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
} | 275 | 1 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__A : List[str] = '''examples/'''
__A : int = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__A : Dict = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
__A : Optional[int] = '''README.md'''
def lowercase ( __snake_case : int , __snake_case : Any , __snake_case : int ):
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase_ : int = f.read()
lowercase_ , lowercase_ : List[str] = REPLACE_PATTERNS[pattern]
lowercase_ : Union[str, Any] = replace.replace('''VERSION''' , __snake_case )
lowercase_ : Optional[Any] = re_pattern.sub(__snake_case , __snake_case )
with open(__snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(__snake_case )
def lowercase ( __snake_case : int ):
for folder, directories, fnames in os.walk(__snake_case ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__snake_case , __snake_case ) , __snake_case , pattern='''examples''' )
def lowercase ( __snake_case : Optional[Any] , __snake_case : Optional[Any]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__snake_case , __snake_case , __snake_case )
if not patch:
update_version_in_examples(__snake_case )
def lowercase ( ):
lowercase_ : Union[str, Any] = '''🤗 Transformers currently provides the following architectures'''
lowercase_ : Union[str, Any] = '''1. Want to contribute a new model?'''
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase_ : List[str] = f.readlines()
# Find the start of the list.
lowercase_ : Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowercase_ : str = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
lowercase_ : str = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(__snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(__snake_case )
def lowercase ( ):
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
lowercase_ : List[Any] = f.read()
lowercase_ : List[str] = REPLACE_PATTERNS['''init'''][0].search(__snake_case ).groups()[0]
return packaging.version.parse(__snake_case )
def lowercase ( __snake_case : Optional[Any]=False ):
lowercase_ : str = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
lowercase_ : Optional[Any] = default_version.base_version
elif patch:
lowercase_ : Optional[int] = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowercase_ : Optional[int] = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowercase_ : int = input(F'''Which version are you releasing? [{default_version}]''' )
if len(__snake_case ) == 0:
lowercase_ : Dict = default_version
print(F'''Updating version to {version}.''' )
global_version_update(__snake_case , patch=__snake_case )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowercase ( ):
lowercase_ : List[Any] = get_version()
lowercase_ : List[str] = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowercase_ : Any = current_version.base_version
# Check with the user we got that right.
lowercase_ : Tuple = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(__snake_case ) == 0:
lowercase_ : str = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(__snake_case )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__A : int = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__A : Any = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 33 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 303 | 0 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _snake_case :
'''simple docstring'''
def __init__( self: str ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any]=0.2 ,lowerCamelCase_: Tuple=0.2 ) -> str:
UpperCAmelCase_ : str = bp_numa
UpperCAmelCase_ : str = bp_numa
UpperCAmelCase_ : Tuple = bp_numa
UpperCAmelCase_ : List[Any] = conva_get[:2]
UpperCAmelCase_ : List[Any] = conva_get[2]
UpperCAmelCase_ : Optional[int] = size_pa
UpperCAmelCase_ : List[str] = rate_w
UpperCAmelCase_ : Union[str, Any] = rate_t
UpperCAmelCase_ : Optional[Any] = [
np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
UpperCAmelCase_ : List[Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
UpperCAmelCase_ : int = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
UpperCAmelCase_ : str = -2 * np.random.rand(self.conva[1] ) + 1
UpperCAmelCase_ : Optional[int] = -2 * np.random.rand(self.num_bpa ) + 1
UpperCAmelCase_ : Dict = -2 * np.random.rand(self.num_bpa ) + 1
def A__ ( self: str ,lowerCamelCase_: Dict ) -> Optional[int]:
# save model dict with pickle
UpperCAmelCase_ : Tuple = {
"""num_bp1""": self.num_bpa,
"""num_bp2""": self.num_bpa,
"""num_bp3""": self.num_bpa,
"""conv1""": self.conva,
"""step_conv1""": self.step_conva,
"""size_pooling1""": self.size_poolinga,
"""rate_weight""": self.rate_weight,
"""rate_thre""": self.rate_thre,
"""w_conv1""": self.w_conva,
"""wkj""": self.wkj,
"""vji""": self.vji,
"""thre_conv1""": self.thre_conva,
"""thre_bp2""": self.thre_bpa,
"""thre_bp3""": self.thre_bpa,
}
with open(lowerCamelCase_ ,"""wb""" ) as f:
pickle.dump(lowerCamelCase_ ,lowerCamelCase_ )
print(F'''Model saved: {save_path}''' )
@classmethod
def A__ ( cls: Dict ,lowerCamelCase_: Any ) -> Dict:
# read saved model
with open(lowerCamelCase_ ,"""rb""" ) as f:
UpperCAmelCase_ : str = pickle.load(lowerCamelCase_ ) # noqa: S301
UpperCAmelCase_ : Tuple = model_dic.get("""conv1""" )
conv_get.append(model_dic.get("""step_conv1""" ) )
UpperCAmelCase_ : Dict = model_dic.get("""size_pooling1""" )
UpperCAmelCase_ : str = model_dic.get("""num_bp1""" )
UpperCAmelCase_ : Optional[Any] = model_dic.get("""num_bp2""" )
UpperCAmelCase_ : Any = model_dic.get("""num_bp3""" )
UpperCAmelCase_ : int = model_dic.get("""rate_weight""" )
UpperCAmelCase_ : Union[str, Any] = model_dic.get("""rate_thre""" )
# create model instance
UpperCAmelCase_ : List[str] = CNN(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# modify model parameter
UpperCAmelCase_ : Dict = model_dic.get("""w_conv1""" )
UpperCAmelCase_ : str = model_dic.get("""wkj""" )
UpperCAmelCase_ : List[Any] = model_dic.get("""vji""" )
UpperCAmelCase_ : List[str] = model_dic.get("""thre_conv1""" )
UpperCAmelCase_ : List[Any] = model_dic.get("""thre_bp2""" )
UpperCAmelCase_ : Optional[int] = model_dic.get("""thre_bp3""" )
return conv_ins
def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ) -> Any:
return 1 / (1 + np.exp(-1 * x ))
def A__ ( self: Union[str, Any] ,lowerCamelCase_: List[Any] ) -> Optional[Any]:
return round(lowerCamelCase_ ,3 )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: int ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ) -> List[str]:
# convolution process
UpperCAmelCase_ : int = convs[0]
UpperCAmelCase_ : Optional[int] = convs[1]
UpperCAmelCase_ : List[str] = np.shape(lowerCamelCase_ )[0]
# get the data slice of original image data, data_focus
UpperCAmelCase_ : int = []
for i_focus in range(0 ,size_data - size_conv + 1 ,lowerCamelCase_ ):
for j_focus in range(0 ,size_data - size_conv + 1 ,lowerCamelCase_ ):
UpperCAmelCase_ : Dict = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowerCamelCase_ )
# calculate the feature map of every single kernel, and saved as list of matrix
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Any = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(lowerCamelCase_ ):
UpperCAmelCase_ : Optional[int] = []
for i_focus in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : Dict = (
np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(lowerCamelCase_ ) )
UpperCAmelCase_ : Tuple = np.asmatrix(lowerCamelCase_ ).reshape(
lowerCamelCase_ ,lowerCamelCase_ )
data_featuremap.append(lowerCamelCase_ )
# expanding the data slice to One dimenssion
UpperCAmelCase_ : int = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowerCamelCase_ ) )
UpperCAmelCase_ : Tuple = np.asarray(lowerCamelCase_ )
return focus_list, data_featuremap
def A__ ( self: List[str] ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple="average_pool" ) -> List[Any]:
# pooling process
UpperCAmelCase_ : Union[str, Any] = len(featuremaps[0] )
UpperCAmelCase_ : List[Any] = int(size_map / size_pooling )
UpperCAmelCase_ : Optional[Any] = []
for i_map in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : Any = featuremaps[i_map]
UpperCAmelCase_ : Optional[Any] = []
for i_focus in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
for j_focus in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase_ : Union[str, Any] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowerCamelCase_ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowerCamelCase_ ) )
UpperCAmelCase_ : Optional[int] = np.asmatrix(lowerCamelCase_ ).reshape(lowerCamelCase_ ,lowerCamelCase_ )
featuremap_pooled.append(lowerCamelCase_ )
return featuremap_pooled
def A__ ( self: Optional[int] ,lowerCamelCase_: Any ) -> List[str]:
# expanding three dimension data to one dimension list
UpperCAmelCase_ : str = []
for i in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : List[str] = np.shape(data[i] )
UpperCAmelCase_ : int = data[i].reshape(1 ,shapes[0] * shapes[1] )
UpperCAmelCase_ : Optional[int] = data_listed.getA().tolist()[0]
data_expanded.extend(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = np.asarray(lowerCamelCase_ )
return data_expanded
def A__ ( self: Any ,lowerCamelCase_: Tuple ) -> Dict:
# expanding matrix to one dimension list
UpperCAmelCase_ : str = np.asarray(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = np.shape(lowerCamelCase_ )
UpperCAmelCase_ : Dict = data_mat.reshape(1 ,shapes[0] * shapes[1] )
return data_expanded
def A__ ( self: int ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ) -> Any:
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = 0
for i_map in range(lowerCamelCase_ ):
UpperCAmelCase_ : str = np.ones((size_map, size_map) )
for i in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
for j in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase_ : int = pd_pool[
i_pool
]
UpperCAmelCase_ : str = i_pool + 1
UpperCAmelCase_ : Union[str, Any] = np.multiply(
lowerCamelCase_ ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) )
pd_all.append(lowerCamelCase_ )
return pd_all
def A__ ( self: Any ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[Any]=bool ) -> List[str]:
# model traning
print("""----------------------Start Training-------------------------""" )
print((""" - - Shape: Train_Data """, np.shape(lowerCamelCase_ )) )
print((""" - - Shape: Teach_Data """, np.shape(lowerCamelCase_ )) )
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : str = []
UpperCAmelCase_ : List[Any] = 10000
while rp < n_repeat and mse >= error_accuracy:
UpperCAmelCase_ : Union[str, Any] = 0
print(F'''-------------Learning Time {rp}--------------''' )
for p in range(len(lowerCamelCase_ ) ):
# print('------------Learning Image: %d--------------'%p)
UpperCAmelCase_ : List[Any] = np.asmatrix(datas_train[p] )
UpperCAmelCase_ : Any = np.asarray(datas_teach[p] )
UpperCAmelCase_ , UpperCAmelCase_ : str = self.convolute(
lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : List[Any] = self.pooling(lowerCamelCase_ ,self.size_poolinga )
UpperCAmelCase_ : Tuple = np.shape(lowerCamelCase_ )
UpperCAmelCase_ : Dict = self._expand(lowerCamelCase_ )
UpperCAmelCase_ : int = data_bp_input
UpperCAmelCase_ : str = np.dot(lowerCamelCase_ ,self.vji.T ) - self.thre_bpa
UpperCAmelCase_ : Dict = self.sig(lowerCamelCase_ )
UpperCAmelCase_ : Dict = np.dot(lowerCamelCase_ ,self.wkj.T ) - self.thre_bpa
UpperCAmelCase_ : Optional[int] = self.sig(lowerCamelCase_ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
UpperCAmelCase_ : List[Any] = np.multiply(
(data_teach - bp_outa) ,np.multiply(lowerCamelCase_ ,(1 - bp_outa) ) )
UpperCAmelCase_ : Optional[int] = np.multiply(
np.dot(lowerCamelCase_ ,self.wkj ) ,np.multiply(lowerCamelCase_ ,(1 - bp_outa) ) )
UpperCAmelCase_ : List[str] = np.dot(lowerCamelCase_ ,self.vji )
UpperCAmelCase_ : Optional[Any] = pd_i_all / (self.size_poolinga * self.size_poolinga)
UpperCAmelCase_ : str = pd_conva_pooled.T.getA().tolist()
UpperCAmelCase_ : Dict = self._calculate_gradient_from_pool(
lowerCamelCase_ ,lowerCamelCase_ ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,)
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
UpperCAmelCase_ : Tuple = self._expand_mat(pd_conva_all[k_conv] )
UpperCAmelCase_ : Tuple = self.rate_weight * np.dot(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Dict = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
UpperCAmelCase_ : Optional[int] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
UpperCAmelCase_ : List[Any] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
UpperCAmelCase_ : Optional[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
UpperCAmelCase_ : str = self.thre_bpa - pd_k_all * self.rate_thre
UpperCAmelCase_ : List[Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
UpperCAmelCase_ : List[Any] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
UpperCAmelCase_ : List[Any] = rp + 1
UpperCAmelCase_ : str = error_count / patterns
all_mse.append(lowerCamelCase_ )
def draw_error():
UpperCAmelCase_ : Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(lowerCamelCase_ ,"""+-""" )
plt.plot(lowerCamelCase_ ,"""r--""" )
plt.xlabel("""Learning Times""" )
plt.ylabel("""All_mse""" )
plt.grid(lowerCamelCase_ ,alpha=0.5 )
plt.show()
print("""------------------Training Complished---------------------""" )
print((""" - - Training epoch: """, rp, F''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def A__ ( self: List[str] ,lowerCamelCase_: Dict ) -> List[str]:
# model predict
UpperCAmelCase_ : Optional[Any] = []
print("""-------------------Start Testing-------------------------""" )
print((""" - - Shape: Test_Data """, np.shape(lowerCamelCase_ )) )
for p in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : Optional[Any] = np.asmatrix(datas_test[p] )
UpperCAmelCase_ , UpperCAmelCase_ : int = self.convolute(
lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : List[Any] = self.pooling(lowerCamelCase_ ,self.size_poolinga )
UpperCAmelCase_ : Tuple = self._expand(lowerCamelCase_ )
UpperCAmelCase_ : int = data_bp_input
UpperCAmelCase_ : List[str] = bp_outa * self.vji.T - self.thre_bpa
UpperCAmelCase_ : Optional[int] = self.sig(lowerCamelCase_ )
UpperCAmelCase_ : Any = bp_outa * self.wkj.T - self.thre_bpa
UpperCAmelCase_ : List[Any] = self.sig(lowerCamelCase_ )
produce_out.extend(bp_outa.getA().tolist() )
UpperCAmelCase_ : int = [list(map(self.do_round ,lowerCamelCase_ ) ) for each in produce_out]
return np.asarray(lowerCamelCase_ )
def A__ ( self: List[str] ,lowerCamelCase_: Tuple ) -> Optional[int]:
# return the data of image after convoluting process so we can check it out
UpperCAmelCase_ : Union[str, Any] = np.asmatrix(lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.convolute(
lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : int = self.pooling(lowerCamelCase_ ,self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 59 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCamelCase_ = '''true'''
def lowerCamelCase_ ( _a : List[Any] , _a : List[str]=82 , _a : Tuple=16 ):
'''simple docstring'''
set_seed(42 )
UpperCAmelCase_ : int = RegressionModel()
UpperCAmelCase_ : List[Any] = deepcopy(_a )
UpperCAmelCase_ : Tuple = RegressionDataset(length=_a )
UpperCAmelCase_ : int = DataLoader(_a , batch_size=_a )
model.to(accelerator.device )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.prepare(_a , _a )
return model, ddp_model, dataloader
def lowerCamelCase_ ( _a : Accelerator , _a : Optional[int]=False ):
'''simple docstring'''
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" )
UpperCAmelCase_ : int = load_dataset("""glue""" , """mrpc""" , split="""validation""" )
def tokenize_function(_a : str ):
UpperCAmelCase_ : List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_a , max_length=_a )
return outputs
with accelerator.main_process_first():
UpperCAmelCase_ : List[str] = dataset.map(
_a , batched=_a , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
UpperCAmelCase_ : Tuple = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_a : List[str] ):
if use_longest:
return tokenizer.pad(_a , padding="""longest""" , return_tensors="""pt""" )
return tokenizer.pad(_a , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return DataLoader(_a , shuffle=_a , collate_fn=_a , batch_size=16 )
def lowerCamelCase_ ( _a : Any , _a : int ):
'''simple docstring'''
UpperCAmelCase_ : int = Accelerator(dispatch_batches=_a , split_batches=_a )
UpperCAmelCase_ : Dict = get_dataloader(_a , not dispatch_batches )
UpperCAmelCase_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(
"""hf-internal-testing/mrpc-bert-base-cased""" , return_dict=_a )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = accelerator.prepare(_a , _a )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCamelCase_ ( _a : Optional[int] , _a : Optional[Any] , _a : str ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = []
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = batch.values()
with torch.no_grad():
UpperCAmelCase_ : str = model(_a )
UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = [], []
for logit, targ in logits_and_targets:
logits.append(_a )
targs.append(_a )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = torch.cat(_a ), torch.cat(_a )
return logits, targs
def lowerCamelCase_ ( _a : Accelerator , _a : str=82 , _a : str=False , _a : Dict=False , _a : Dict=16 ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = get_basic_setup(_a , _a , _a )
UpperCAmelCase_ , UpperCAmelCase_ : Any = generate_predictions(_a , _a , _a )
assert (
len(_a ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_a )}'''
def lowerCamelCase_ ( _a : bool = False , _a : bool = False ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = evaluate.load("""glue""" , """mrpc""" )
UpperCAmelCase_ , UpperCAmelCase_ : str = get_mrpc_setup(_a , _a )
# First do baseline
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = setup["""no"""]
model.to(_a )
model.eval()
for batch in dataloader:
batch.to(_a )
with torch.inference_mode():
UpperCAmelCase_ : str = model(**_a )
UpperCAmelCase_ : Any = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_a , references=batch["""labels"""] )
UpperCAmelCase_ : str = metric.compute()
# Then do distributed
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
UpperCAmelCase_ : List[str] = model(**_a )
UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ : Union[str, Any] = batch["""labels"""]
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_a , references=_a )
UpperCAmelCase_ : str = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : Any = Accelerator(split_batches=_a , dispatch_batches=_a )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("""**Testing gather_for_metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(_a , _a )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test torch metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
UpperCAmelCase_ : Optional[int] = Accelerator(split_batches=_a , dispatch_batches=_a )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(_a , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test last batch is not dropped when perfectly divisible**""" )
UpperCAmelCase_ : str = Accelerator()
test_torch_metrics(_a , 512 )
accelerator.state._reset_state()
def lowerCamelCase_ ( _a : Optional[Any] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 59 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowerCamelCase ( unittest.TestCase ):
@property
def snake_case_ (self ) -> Any:
torch.manual_seed(0 )
UpperCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def snake_case_ (self ) -> List[Any]:
UpperCamelCase = self.dummy_uncond_unet
UpperCamelCase = KarrasVeScheduler()
UpperCamelCase = KarrasVePipeline(unet=__a , scheduler=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = pipe(num_inference_steps=2 , generator=__a , output_type="numpy" ).images
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = pipe(num_inference_steps=2 , generator=__a , output_type="numpy" , return_dict=__a )[0]
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
def snake_case_ (self ) -> Tuple:
UpperCamelCase = "google/ncsnpp-celebahq-256"
UpperCamelCase = UNetaDModel.from_pretrained(__a )
UpperCamelCase = KarrasVeScheduler()
UpperCamelCase = KarrasVePipeline(unet=__a , scheduler=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = pipe(num_inference_steps=20 , generator=__a , output_type="numpy" ).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
UpperCamelCase = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 153 |
"""simple docstring"""
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def a__ ( _SCREAMING_SNAKE_CASE ): # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def a__ ( _SCREAMING_SNAKE_CASE ): # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class _lowerCamelCase :
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
class _lowerCamelCase ( _lowercase ):
def snake_case_ (self ) -> Optional[int]:
UpperCamelCase = {}
UpperCamelCase = []
UpperCamelCase = 1
UpperCamelCase = [1, 2]
UpperCamelCase = {"a": 1, "b": 2}
UpperCamelCase = {"a": [1, 2], "b": [3, 4]}
UpperCamelCase = {"a": {"1": 1}, "b": 2}
UpperCamelCase = {"a": 1, "b": 2, "c": 3, "d": 4}
UpperCamelCase = {}
UpperCamelCase = []
UpperCamelCase = 2
UpperCamelCase = [2, 3]
UpperCamelCase = {"a": 2, "b": 3}
UpperCamelCase = {"a": [2, 3], "b": [4, 5]}
UpperCamelCase = {"a": {"1": 2}, "b": 3}
UpperCamelCase = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
UpperCamelCase = 2
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
UpperCamelCase = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
UpperCamelCase = {"a": 2, "b": 0, "c": 2}
UpperCamelCase = {
"a": np.eye(2 ).astype(__a ),
"b": np.zeros(3 ).astype(__a ),
"c": np.ones(2 ).astype(__a ),
}
self.assertEqual(map_nested(__a , __a , map_numpy=__a ) , __a )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__a , __a , map_numpy=__a ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(__a , __a , map_numpy=__a , num_proc=__a ) , __a )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__a , __a , map_numpy=__a , num_proc=__a ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(__a ): # can't pickle a local lambda
map_nested(lambda __a : x + 1 , __a , num_proc=__a )
def snake_case_ (self ) -> Tuple:
UpperCamelCase = {"a": 1, "b": 2}
UpperCamelCase = {"a": 3, "b": 4}
UpperCamelCase = {"a": 5, "b": 6}
UpperCamelCase = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(__a , __a , __a ) ) , __a )
def snake_case_ (self ) -> Dict:
class _lowerCamelCase :
UpperCAmelCase_ = "bar"
UpperCamelCase = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(__a , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
UpperCamelCase = {F"{i}": i for i in range(_SCREAMING_SNAKE_CASE )}
UpperCamelCase = map_nested(lambda _SCREAMING_SNAKE_CASE : x + 10 , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class _lowerCamelCase ( _lowercase ):
@require_tf
def snake_case_ (self ) -> str:
import tensorflow as tf
from tensorflow.keras import layers
UpperCamelCase = layers.Dense(2 )
def gen_random_output():
UpperCamelCase = tf.random.uniform((1, 3) )
return model(__a ).numpy()
with temp_seed(42 , set_tensorflow=__a ):
UpperCamelCase = gen_random_output()
with temp_seed(42 , set_tensorflow=__a ):
UpperCamelCase = gen_random_output()
UpperCamelCase = gen_random_output()
np.testing.assert_equal(__a , __a )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def snake_case_ (self ) -> Tuple:
import torch
def gen_random_output():
UpperCamelCase = torch.nn.Linear(3 , 2 )
UpperCamelCase = torch.rand(1 , 3 )
return model(__a ).detach().numpy()
with temp_seed(42 , set_pytorch=__a ):
UpperCamelCase = gen_random_output()
with temp_seed(42 , set_pytorch=__a ):
UpperCamelCase = gen_random_output()
UpperCamelCase = gen_random_output()
np.testing.assert_equal(__a , __a )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def snake_case_ (self ) -> Tuple:
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
UpperCamelCase = gen_random_output()
with temp_seed(42 ):
UpperCamelCase = gen_random_output()
UpperCamelCase = gen_random_output()
np.testing.assert_equal(__a , __a )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" , [{}] )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = NestedDataStructure(_SCREAMING_SNAKE_CASE ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = NestedDataStructure(_SCREAMING_SNAKE_CASE ).flatten()
assert output == expected_output
def a__ ( ):
"""simple docstring"""
UpperCamelCase = A(x=1 , y="foobar" )
UpperCamelCase = {"x": 1, "y": "foobar"}
assert asdict(_SCREAMING_SNAKE_CASE ) == expected_output
UpperCamelCase = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]}
UpperCamelCase = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(_SCREAMING_SNAKE_CASE ) == expected_output
with pytest.raises(_SCREAMING_SNAKE_CASE ):
asdict([1, A(x=10 , y="foo" )] )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return text.split()
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def a__ ( ):
"""simple docstring"""
with Pool(2 ) as pool:
UpperCamelCase = list(iflatmap_unordered(_SCREAMING_SNAKE_CASE , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(_SCREAMING_SNAKE_CASE ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
UpperCamelCase = list(iflatmap_unordered(_SCREAMING_SNAKE_CASE , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(_SCREAMING_SNAKE_CASE ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
UpperCamelCase = []
for yield_time, content in iflatmap_unordered(
_SCREAMING_SNAKE_CASE , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(_SCREAMING_SNAKE_CASE )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(_SCREAMING_SNAKE_CASE ) == 4
| 153 | 1 |
"""simple docstring"""
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class SCREAMING_SNAKE_CASE_ ( __a , __a ):
"""simple docstring"""
__lowercase : Tuple = 1
@register_to_config
def __init__( self , lowerCAmelCase__=2_0_0_0 , lowerCAmelCase__=0.1 , lowerCAmelCase__=2_0 , lowerCAmelCase__=1E-3):
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None):
__SCREAMING_SNAKE_CASE = torch.linspace(1 , self.config.sampling_eps , lowerCAmelCase__ , device=lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None):
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""")
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__SCREAMING_SNAKE_CASE = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__SCREAMING_SNAKE_CASE = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff))
__SCREAMING_SNAKE_CASE = std.flatten()
while len(std.shape) < len(score.shape):
__SCREAMING_SNAKE_CASE = std.unsqueeze(-1)
__SCREAMING_SNAKE_CASE = -score / std
# compute
__SCREAMING_SNAKE_CASE = -1.0 / len(self.timesteps)
__SCREAMING_SNAKE_CASE = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__SCREAMING_SNAKE_CASE = beta_t.flatten()
while len(beta_t.shape) < len(x.shape):
__SCREAMING_SNAKE_CASE = beta_t.unsqueeze(-1)
__SCREAMING_SNAKE_CASE = -0.5 * beta_t * x
__SCREAMING_SNAKE_CASE = torch.sqrt(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = drift - diffusion**2 * score
__SCREAMING_SNAKE_CASE = x + drift * dt
# add noise
__SCREAMING_SNAKE_CASE = randn_tensor(x.shape , layout=x.layout , generator=lowerCAmelCase__ , device=x.device , dtype=x.dtype)
__SCREAMING_SNAKE_CASE = x_mean + diffusion * math.sqrt(-dt) * noise
return x, x_mean
def __len__( self):
return self.config.num_train_timesteps
| 359 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Union[str, Any] = XGLMTokenizer
__lowercase : int = XGLMTokenizerFast
__lowercase : Optional[Any] = True
__lowercase : str = True
def snake_case_ ( self):
super().setUp()
# We have a SentencePiece fixture for testing
__SCREAMING_SNAKE_CASE = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
tokenizer.save_pretrained(self.tmpdirname)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """<pad>"""
__SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__) , lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__) , lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<s>""")
self.assertEqual(vocab_keys[1] , """<pad>""")
self.assertEqual(len(lowerCAmelCase__) , 1_0_0_8)
def snake_case_ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_8)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""This is a test""")
self.assertListEqual(lowerCAmelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
__SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def snake_case_ ( self):
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""")
def snake_case_ ( self):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase__ , f.name)
__SCREAMING_SNAKE_CASE = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = pickle.dumps(lowerCAmelCase__)
pickle.loads(lowerCAmelCase__)
def snake_case_ ( self):
if not self.test_rust_tokenizer:
return
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE = """I was born in 92000, and this is falsé."""
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """Hello World!"""
__SCREAMING_SNAKE_CASE = [2, 3_1_2_2_7, 4_4_4_7, 3_5]
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__))
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
__SCREAMING_SNAKE_CASE = [2, 1_0_1_8, 6_7, 1_1, 1_9_8_8, 2_6_1_7, 5_6_3_1, 2_7_8, 1_1, 3_4_0_7, 4_8, 7_1_6_3_0, 2_8_0_8_5, 4, 3_2_3_4, 1_5_7, 1_3, 6, 5, 6, 4, 3_5_2_6, 7_6_8, 1_5, 6_5_9, 5_7, 2_9_8, 3_9_8_3, 8_6_4, 1_2_9, 2_1, 6, 5, 1_3_6_7_5, 3_7_7, 6_5_2, 7_5_8_0, 1_0_3_4_1, 1_5_5, 2_8_1_7, 4_2_2, 1_6_6_6, 7, 1_6_7_4, 5_3, 1_1_3, 2_0_2_2_7_7, 1_7_8_9_2, 3_3, 6_0, 8_7, 4, 3_2_3_4, 1_5_7, 6_1, 2_6_6_7, 5_2_3_7_6, 1_9, 8_8, 2_3, 7_3_5]
# fmt: on
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__))
@slow
def snake_case_ ( self):
# fmt: off
__SCREAMING_SNAKE_CASE = {
"""input_ids""": [[2, 1_0_8_8_2_5, 1_1_6_3, 1_5, 8_8_0_1_0, 4_7_3, 1_5_8_9_8, 1_5_7, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 2_3_8_0_2_1, 1_1_6_3, 5_3, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 5_3_2_8_3, 1_8_2_3_9_6, 8, 1_8_5_6_6, 1_6, 3_6_7_3_3, 4_1_0_1, 8, 2_3_0, 2_4_4_0_1_7, 1_2_2_5_5_3, 7, 1_5, 1_3_2_5_9_7, 4, 2_9_3, 1_2_5_1_1, 7_6_1_0, 4, 3_4_1_4, 1_3_2_5_9_7, 9, 4, 3_2_3_6_1, 3_6_2, 4, 7_3_4, 2_8_5_1_2, 3_2_5_6_9, 1_8, 4, 3_2_3_6_1, 2_6_0_9_6, 1_4_9_8_2, 7_3, 1_8_7_1_5, 2_1_4_3_3, 2_3_5_2_6_1, 1_5, 4_9_2, 1_2_4_2_7, 1_6, 5_3, 1_8_7_1_5, 2_1_4_3_3, 6_5_4_5_4, 1_5, 2_3_6_5_9, 5_6_3, 1_6, 2_7_8, 5_9_7, 2_8_4_3, 5_9_5, 7_9_3_1, 1_8_2_3_9_6, 6_4_1_8_6, 2_2, 8_8_6, 5_9_5, 1_3_2_9_8_1, 5_3, 2_5_5_4_0, 3_4_4_9, 4_3_9_8_2, 3_9_9_0_1, 5_9_5_1, 8_7_8, 3_3_0, 4, 2_7_6_9_4, 8_0_2_6_9, 3_1_2, 5_3, 6_5_1_7, 1_1_7_8_0, 6_1_1, 2_0_4_0_8, 5], [2, 6, 1_3_2_5_9_7, 6_7, 4_2_8_9_7, 3_3, 5_9_2, 8, 1_6_3_7_2_9, 2_5_5_4_0, 3_6_1, 1_3_6_9_9_7, 1_0_9_5_1_4, 1_7_3_2_3_0, 7, 5_0_1, 6_0, 1_0_2_9_1_3, 1_9_6, 5_6_3_1, 2_3_5, 6_3_2_4_3, 4_7_3, 6, 2_3_1_7_5_7, 7_4, 5_2_7_7, 7_9_0_5, 5_3, 3_0_9_5, 3_7_3_1_7, 2_2, 4_5_4, 1_8_3_8_7_4, 5], [2, 2_6_8, 3_1_2_9_8, 4_6_5_3_0, 6, 1_3_2_9_3_5, 4_3_8_3_1, 7, 5_9_7, 3_2, 2_4, 3_6_8_8, 9_8_6_5, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="""facebook/xglm-564M""" , padding=lowerCAmelCase__ , )
| 255 | 0 |
'''simple docstring'''
from __future__ import annotations
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase__ : str ,lowercase__ : str ):
__lowercase , __lowercase = text, pattern
__lowercase , __lowercase = len(lowercase__ ), len(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : str ):
for i in range(self.patLen - 1 ,-1 ,-1 ):
if char == self.pattern[i]:
return i
return -1
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
for i in range(self.patLen - 1 ,-1 ,-1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def SCREAMING_SNAKE_CASE ( self : int ):
# searches pattern in text and returns index positions
__lowercase = []
for i in range(self.textLen - self.patLen + 1 ):
__lowercase = self.mismatch_in_text(lowercase__ )
if mismatch_index == -1:
positions.append(lowercase__ )
else:
__lowercase = self.match_in_pattern(self.text[mismatch_index] )
__lowercase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
lowerCAmelCase__ = '''ABAABA'''
lowerCAmelCase__ = '''AB'''
lowerCAmelCase__ = BoyerMooreSearch(text, pattern)
lowerCAmelCase__ = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 104 |
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 8.31_44_62 # Unit - J mol-1 K-1
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 321 | 0 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : torch.FloatTensor
class a (nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase : Any=3 , lowerCamelCase : List[str]=3 , lowerCamelCase : List[str]=("DownEncoderBlock2D",) , lowerCamelCase : List[str]=(64,) , lowerCamelCase : int=2 , lowerCamelCase : Tuple=32 , lowerCamelCase : Union[str, Any]="silu" , lowerCamelCase : int=True , ) -> int:
super().__init__()
__snake_case : Dict = layers_per_block
__snake_case : Optional[int] = torch.nn.Convad(
lowerCamelCase , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
__snake_case : str = None
__snake_case : Any = nn.ModuleList([] )
# down
__snake_case : Optional[int] = block_out_channels[0]
for i, down_block_type in enumerate(lowerCamelCase ):
__snake_case : int = output_channel
__snake_case : List[Any] = block_out_channels[i]
__snake_case : Any = i == len(lowerCamelCase ) - 1
__snake_case : Any = get_down_block(
lowerCamelCase , num_layers=self.layers_per_block , in_channels=lowerCamelCase , out_channels=lowerCamelCase , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=lowerCamelCase , resnet_groups=lowerCamelCase , attention_head_dim=lowerCamelCase , temb_channels=lowerCamelCase , )
self.down_blocks.append(lowerCamelCase )
# mid
__snake_case : Optional[int] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCamelCase , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase , temb_channels=lowerCamelCase , )
# out
__snake_case : Union[str, Any] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCamelCase , eps=1E-6 )
__snake_case : str = nn.SiLU()
__snake_case : List[Any] = 2 * out_channels if double_z else out_channels
__snake_case : int = nn.Convad(block_out_channels[-1] , lowerCamelCase , 3 , padding=1 )
__snake_case : Tuple = False
def __snake_case ( self : Tuple , lowerCamelCase : str ) -> Optional[Any]:
__snake_case : int = x
__snake_case : Optional[Any] = self.conv_in(lowerCamelCase )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase : Any ):
def custom_forward(*lowerCamelCase : List[str] ):
return module(*lowerCamelCase )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
__snake_case : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase ) , lowerCamelCase , use_reentrant=lowerCamelCase )
# middle
__snake_case : Optional[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase , use_reentrant=lowerCamelCase )
else:
for down_block in self.down_blocks:
__snake_case : int = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase ) , lowerCamelCase )
# middle
__snake_case : Optional[Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCamelCase )
else:
# down
for down_block in self.down_blocks:
__snake_case : Union[str, Any] = down_block(lowerCamelCase )
# middle
__snake_case : List[Any] = self.mid_block(lowerCamelCase )
# post-process
__snake_case : Any = self.conv_norm_out(lowerCamelCase )
__snake_case : Union[str, Any] = self.conv_act(lowerCamelCase )
__snake_case : int = self.conv_out(lowerCamelCase )
return sample
class a (nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase : Tuple=3 , lowerCamelCase : Optional[Any]=3 , lowerCamelCase : Any=("UpDecoderBlock2D",) , lowerCamelCase : int=(64,) , lowerCamelCase : Union[str, Any]=2 , lowerCamelCase : List[Any]=32 , lowerCamelCase : Any="silu" , lowerCamelCase : str="group" , ) -> Union[str, Any]:
super().__init__()
__snake_case : Optional[int] = layers_per_block
__snake_case : int = nn.Convad(
lowerCamelCase , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
__snake_case : List[Any] = None
__snake_case : List[str] = nn.ModuleList([] )
__snake_case : Any = in_channels if norm_type == "spatial" else None
# mid
__snake_case : Dict = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCamelCase , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase , temb_channels=lowerCamelCase , )
# up
__snake_case : List[str] = list(reversed(lowerCamelCase ) )
__snake_case : Optional[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase ):
__snake_case : str = output_channel
__snake_case : int = reversed_block_out_channels[i]
__snake_case : List[str] = i == len(lowerCamelCase ) - 1
__snake_case : Optional[Any] = get_up_block(
lowerCamelCase , num_layers=self.layers_per_block + 1 , in_channels=lowerCamelCase , out_channels=lowerCamelCase , prev_output_channel=lowerCamelCase , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=lowerCamelCase , resnet_groups=lowerCamelCase , attention_head_dim=lowerCamelCase , temb_channels=lowerCamelCase , resnet_time_scale_shift=lowerCamelCase , )
self.up_blocks.append(lowerCamelCase )
__snake_case : Optional[Any] = output_channel
# out
if norm_type == "spatial":
__snake_case : Optional[Any] = SpatialNorm(block_out_channels[0] , lowerCamelCase )
else:
__snake_case : List[str] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCamelCase , eps=1E-6 )
__snake_case : Dict = nn.SiLU()
__snake_case : Any = nn.Convad(block_out_channels[0] , lowerCamelCase , 3 , padding=1 )
__snake_case : str = False
def __snake_case ( self : Tuple , lowerCamelCase : Dict , lowerCamelCase : Union[str, Any]=None ) -> Optional[Any]:
__snake_case : str = z
__snake_case : Tuple = self.conv_in(lowerCamelCase )
__snake_case : int = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase : str ):
def custom_forward(*lowerCamelCase : Optional[Any] ):
return module(*lowerCamelCase )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
__snake_case : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase , lowerCamelCase , use_reentrant=lowerCamelCase )
__snake_case : str = sample.to(lowerCamelCase )
# up
for up_block in self.up_blocks:
__snake_case : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase ) , lowerCamelCase , lowerCamelCase , use_reentrant=lowerCamelCase )
else:
# middle
__snake_case : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase , lowerCamelCase )
__snake_case : Dict = sample.to(lowerCamelCase )
# up
for up_block in self.up_blocks:
__snake_case : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase ) , lowerCamelCase , lowerCamelCase )
else:
# middle
__snake_case : List[Any] = self.mid_block(lowerCamelCase , lowerCamelCase )
__snake_case : Union[str, Any] = sample.to(lowerCamelCase )
# up
for up_block in self.up_blocks:
__snake_case : Any = up_block(lowerCamelCase , lowerCamelCase )
# post-process
if latent_embeds is None:
__snake_case : str = self.conv_norm_out(lowerCamelCase )
else:
__snake_case : List[Any] = self.conv_norm_out(lowerCamelCase , lowerCamelCase )
__snake_case : int = self.conv_act(lowerCamelCase )
__snake_case : str = self.conv_out(lowerCamelCase )
return sample
class a (nn.Module ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase : str , lowerCamelCase : List[str] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any]=None , lowerCamelCase : int="random" , lowerCamelCase : int=False , lowerCamelCase : Optional[int]=True ) -> Dict:
super().__init__()
__snake_case : List[str] = n_e
__snake_case : str = vq_embed_dim
__snake_case : Optional[int] = beta
__snake_case : List[str] = legacy
__snake_case : str = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
__snake_case : Any = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
__snake_case : int = self.used.shape[0]
__snake_case : int = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__snake_case : Tuple = self.re_embed
__snake_case : Any = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
__snake_case : List[Any] = n_e
__snake_case : Tuple = sane_index_shape
def __snake_case ( self : Dict , lowerCamelCase : Optional[int] ) -> Optional[int]:
__snake_case : List[Any] = inds.shape
assert len(lowerCamelCase ) > 1
__snake_case : Union[str, Any] = inds.reshape(ishape[0] , -1 )
__snake_case : Optional[Any] = self.used.to(lowerCamelCase )
__snake_case : Tuple = (inds[:, :, None] == used[None, None, ...]).long()
__snake_case : Optional[Any] = match.argmax(-1 )
__snake_case : Dict = match.sum(2 ) < 1
if self.unknown_index == "random":
__snake_case : Optional[int] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
__snake_case : int = self.unknown_index
return new.reshape(lowerCamelCase )
def __snake_case ( self : Union[str, Any] , lowerCamelCase : List[str] ) -> Any:
__snake_case : int = inds.shape
assert len(lowerCamelCase ) > 1
__snake_case : str = inds.reshape(ishape[0] , -1 )
__snake_case : Any = self.used.to(lowerCamelCase )
if self.re_embed > self.used.shape[0]: # extra token
__snake_case : Optional[Any] = 0 # simply set to zero
__snake_case : List[str] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCamelCase )
return back.reshape(lowerCamelCase )
def __snake_case ( self : int , lowerCamelCase : int ) -> List[str]:
# reshape z -> (batch, height, width, channel) and flatten
__snake_case : Dict = z.permute(0 , 2 , 3 , 1 ).contiguous()
__snake_case : Tuple = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__snake_case : List[Any] = torch.argmin(torch.cdist(lowerCamelCase , self.embedding.weight ) , dim=1 )
__snake_case : Dict = self.embedding(lowerCamelCase ).view(z.shape )
__snake_case : Dict = None
__snake_case : str = None
# compute loss for embedding
if not self.legacy:
__snake_case : Tuple = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__snake_case : List[Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__snake_case : Union[str, Any] = z + (z_q - z).detach()
# reshape back to match original input shape
__snake_case : List[str] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
__snake_case : Dict = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
__snake_case : Optional[int] = self.remap_to_used(lowerCamelCase )
__snake_case : Any = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
__snake_case : Optional[Any] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __snake_case ( self : str , lowerCamelCase : str , lowerCamelCase : Dict ) -> int:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
__snake_case : Optional[int] = indices.reshape(shape[0] , -1 ) # add batch axis
__snake_case : Optional[int] = self.unmap_to_all(lowerCamelCase )
__snake_case : int = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__snake_case : int = self.embedding(lowerCamelCase )
if shape is not None:
__snake_case : Any = z_q.view(lowerCamelCase )
# reshape back to match original input shape
__snake_case : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : str , lowerCamelCase : int , lowerCamelCase : str=False ) -> Tuple:
__snake_case : str = parameters
__snake_case , __snake_case : int = torch.chunk(lowerCamelCase , 2 , dim=1 )
__snake_case : Union[str, Any] = torch.clamp(self.logvar , -30.0 , 20.0 )
__snake_case : Dict = deterministic
__snake_case : Any = torch.exp(0.5 * self.logvar )
__snake_case : Tuple = torch.exp(self.logvar )
if self.deterministic:
__snake_case : Union[str, Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __snake_case ( self : Dict , lowerCamelCase : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
__snake_case : str = randn_tensor(
self.mean.shape , generator=lowerCamelCase , device=self.parameters.device , dtype=self.parameters.dtype )
__snake_case : Dict = self.mean + self.std * sample
return x
def __snake_case ( self : str , lowerCamelCase : Optional[Any]=None ) -> Optional[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __snake_case ( self : int , lowerCamelCase : Any , lowerCamelCase : List[Any]=[1, 2, 3] ) -> str:
if self.deterministic:
return torch.Tensor([0.0] )
__snake_case : int = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCamelCase )
def __snake_case ( self : Tuple ) -> List[str]:
return self.mean
| 134 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ["image_processor", "tokenizer"]
__UpperCAmelCase : str = "OwlViTImageProcessor"
__UpperCAmelCase : Dict = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : str , lowerCamelCase : Any=None , lowerCamelCase : Any=None , **lowerCamelCase : Union[str, Any] ) -> List[Any]:
__snake_case : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCamelCase , )
__snake_case : List[Any] = kwargs.pop("feature_extractor" )
__snake_case : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCamelCase , lowerCamelCase )
def __call__( self : Union[str, Any] , lowerCamelCase : Tuple=None , lowerCamelCase : int=None , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : List[str]="max_length" , lowerCamelCase : Dict="np" , **lowerCamelCase : str ) -> List[Any]:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(lowerCamelCase , lowerCamelCase ) or (isinstance(lowerCamelCase , lowerCamelCase ) and not isinstance(text[0] , lowerCamelCase )):
__snake_case : Union[str, Any] = [self.tokenizer(lowerCamelCase , padding=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )]
elif isinstance(lowerCamelCase , lowerCamelCase ) and isinstance(text[0] , lowerCamelCase ):
__snake_case : Tuple = []
# Maximum number of queries across batch
__snake_case : str = max([len(lowerCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(lowerCamelCase ) != max_num_queries:
__snake_case : Dict = t + [" "] * (max_num_queries - len(lowerCamelCase ))
__snake_case : int = self.tokenizer(lowerCamelCase , padding=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
encodings.append(lowerCamelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
__snake_case : Any = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__snake_case : Tuple = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__snake_case : List[Any] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__snake_case : Any = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__snake_case : int = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
__snake_case : int = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__snake_case : int = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
__snake_case : Dict = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
__snake_case : Any = BatchEncoding()
__snake_case : Tuple = input_ids
__snake_case : int = attention_mask
if query_images is not None:
__snake_case : List[Any] = BatchEncoding()
__snake_case : Union[str, Any] = self.image_processor(
lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase ).pixel_values
__snake_case : str = query_pixel_values
if images is not None:
__snake_case : Optional[int] = self.image_processor(lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
if text is not None and images is not None:
__snake_case : List[str] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__snake_case : int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase ) , tensor_type=lowerCamelCase )
def __snake_case ( self : Dict , *lowerCamelCase : List[Any] , **lowerCamelCase : Union[str, Any] ) -> str:
return self.image_processor.post_process(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Union[str, Any] , *lowerCamelCase : str , **lowerCamelCase : List[str] ) -> Tuple:
return self.image_processor.post_process_object_detection(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Optional[Any] , *lowerCamelCase : Optional[Any] , **lowerCamelCase : Optional[Any] ) -> Any:
return self.image_processor.post_process_image_guided_detection(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : List[Any] , *lowerCamelCase : Tuple , **lowerCamelCase : Optional[int] ) -> str:
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Union[str, Any] , *lowerCamelCase : Tuple , **lowerCamelCase : List[Any] ) -> Tuple:
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@property
def __snake_case ( self : Any ) -> Dict:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCamelCase , )
return self.image_processor_class
@property
def __snake_case ( self : List[str] ) -> Union[str, Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCamelCase , )
return self.image_processor
| 134 | 1 |
from math import factorial
def __snake_case ( _lowerCAmelCase : int = 100 ) -> int:
return sum(int(_lowerCAmelCase ) for x in str(factorial(_lowerCAmelCase ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 300 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : float | Decimal , _lowerCAmelCase : float = 10**-10 ) -> float:
A_ : Dict = a
while True:
A_ : Union[str, Any] = Decimal(_lowerCAmelCase ) - (
Decimal(eval(_lowerCAmelCase ) ) / Decimal(eval(str(diff(_lowerCAmelCase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_lowerCAmelCase ) ) < precision: # noqa: S307
return float(_lowerCAmelCase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 300 | 1 |
# using dfs for finding eulerian path traversal
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Any:
lowerCAmelCase__ : int = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = True, True
lowerCAmelCase__ : List[Any] = dfs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return path
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : str = -1
for i in range(SCREAMING_SNAKE_CASE_ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
lowerCAmelCase__ : List[str] = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
lowerCAmelCase__ : Optional[int] = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
lowerCAmelCase__ , lowerCAmelCase__ : Dict = check_circuit_or_path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if check == 3:
print('graph is not Eulerian' )
print('no path' )
return
lowerCAmelCase__ : str = 1
if check == 2:
lowerCAmelCase__ : Any = odd_node
print('graph has a Euler path' )
if check == 1:
print('graph has a Euler cycle' )
lowerCAmelCase__ : List[Any] = dfs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( ) -> List[Any]:
lowerCAmelCase__ : Optional[Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
lowerCAmelCase__ : str = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
lowerCAmelCase__ : Union[str, Any] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
lowerCAmelCase__ : Tuple = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
lowerCAmelCase__ : Optional[Any] = {
1: [],
2: []
# all degree is zero
}
lowerCAmelCase__ : Optional[int] = 10
check_euler(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_euler(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_euler(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_euler(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_euler(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main() | 307 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = DanceDiffusionPipeline
lowercase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
lowercase = PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
lowercase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
lowercase = False
lowercase = False
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16_000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=a , use_timestep_embedding=a , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
lowerCAmelCase__ : Tuple = IPNDMScheduler()
lowerCAmelCase__ : str = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _lowerCamelCase ( self : int , a : Dict , a : List[str]=0 ):
'''simple docstring'''
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(a )
else:
lowerCAmelCase__ : Optional[Any] = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : Optional[Any] = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : List[str] = DanceDiffusionPipeline(**a )
lowerCAmelCase__ : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a )
lowerCAmelCase__ : List[Any] = pipe(**a )
lowerCAmelCase__ : List[str] = output.audios
lowerCAmelCase__ : Optional[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowerCAmelCase__ : List[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch_device
lowerCAmelCase__ : List[str] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
lowerCAmelCase__ : List[str] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[int] = torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe(generator=a , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
lowerCAmelCase__ : int = output.audios
lowerCAmelCase__ : List[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase__ : Dict = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = torch_device
lowerCAmelCase__ : List[Any] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
lowerCAmelCase__ : Optional[int] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : str = torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe(generator=a , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
lowerCAmelCase__ : str = output.audios
lowerCAmelCase__ : Tuple = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase__ : int = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2 | 307 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : float ) -> float:
'''simple docstring'''
return 10 - x * x
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float ) -> float:
'''simple docstring'''
if equation(snake_case_ ) * equation(snake_case_ ) >= 0:
raise ValueError("Wrong space!" )
UpperCAmelCase_ = a
while (b - a) >= 0.01:
# Find middle point
UpperCAmelCase_ = (a + b) / 2
# Check if middle point is root
if equation(snake_case_ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(snake_case_ ) * equation(snake_case_ ) < 0:
UpperCAmelCase_ = c
else:
UpperCAmelCase_ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 1 | '''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : List[Any] ) -> List[Any]:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : str=True ) -> Optional[Any]:
'''simple docstring'''
model.train()
UpperCAmelCase_ = model(snake_case_ )
UpperCAmelCase_ = F.mse_loss(snake_case_ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Any=False ) -> Dict:
'''simple docstring'''
set_seed(42 )
UpperCAmelCase_ = RegressionModel()
UpperCAmelCase_ = deepcopy(snake_case_ )
UpperCAmelCase_ = RegressionDataset(length=80 )
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 )
model.to(accelerator.device )
if sched:
UpperCAmelCase_ = AdamW(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ = AdamW(params=ddp_model.parameters() , lr=1E-3 )
UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 )
UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 )
# Make a copy of `model`
if sched:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowerCAmelCase_ ( snake_case_ : Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ )
# Use a single batch
UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
# Sync grads
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )]
def lowerCAmelCase_ ( snake_case_ : Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ )
# Use a single batch
UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
# Sync grads
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )]
def lowerCAmelCase_ ( snake_case_ : Optional[int]=False , snake_case_ : str=False ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = Accelerator(
split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ )
for iteration, batch in enumerate(snake_case_ ):
UpperCAmelCase_ , UpperCAmelCase_ = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case_ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )]
GradientState._reset_state()
def lowerCAmelCase_ ( snake_case_ : Optional[Any]=False , snake_case_ : Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = Accelerator(
split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ , snake_case_ )
for iteration, batch in enumerate(snake_case_ ):
UpperCAmelCase_ , UpperCAmelCase_ = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case_ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
UpperCAmelCase_ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case_ ))
if accelerator.num_processes > 1:
check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = Accelerator()
UpperCAmelCase_ = RegressionDataset(length=80 )
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 )
UpperCAmelCase_ = RegressionDataset(length=96 )
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 )
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(snake_case_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ )
if iteration < len(snake_case_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(snake_case_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ )
if batch_num < len(snake_case_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = Accelerator()
UpperCAmelCase_ = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(snake_case_ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(snake_case_ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(snake_case_ , snake_case_ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Dict ) -> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 1 | 1 |
import datasets
from .evaluate import evaluate
UpperCamelCase = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
UpperCamelCase = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
UpperCamelCase = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
"""simple docstring"""
def a ( self : Optional[Any] ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , )
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
lowerCAmelCase__ = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
lowerCAmelCase__ = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
lowerCAmelCase__ = evaluate(dataset=SCREAMING_SNAKE_CASE__ , predictions=SCREAMING_SNAKE_CASE__ )
return score
| 221 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Any ) -> int:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
lowerCAmelCase__ = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[Any] ) -> Any:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : int ) -> Optional[Any]:
lowerCAmelCase__ = "sgugger/tiny-distilbert-classification"
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , only_pretrain_model=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : Optional[Any] ) -> int:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , torchscript=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def a ( self : Dict ) -> Optional[Any]:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , fpaa=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : Union[str, Any] ) -> Tuple:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
lowerCAmelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
# set architectures equal to `None`
lowerCAmelCase__ = None
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ , configs=[config] )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : Any ) -> Optional[Any]:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision" )
def a ( self : int ) -> Dict:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=SCREAMING_SNAKE_CASE__ , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def a ( self : Optional[int] ) -> Union[str, Any]:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
lowerCAmelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ , configs=[config] )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : Optional[Any] ) -> Optional[Any]:
lowerCAmelCase__ = "sshleifer/tinier_bart"
lowerCAmelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ , configs=[config] )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : List[str] ) -> Dict:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
lowerCAmelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ , configs=[config] )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def a ( self : Optional[int] ) -> Optional[int]:
lowerCAmelCase__ = "sshleifer/tinier_bart"
lowerCAmelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ , configs=[config] )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def a ( self : List[Any] ) -> Optional[int]:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , save_to_csv=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(SCREAMING_SNAKE_CASE__ , "inf_time.csv" ) , train_memory_csv_file=os.path.join(SCREAMING_SNAKE_CASE__ , "train_mem.csv" ) , inference_memory_csv_file=os.path.join(SCREAMING_SNAKE_CASE__ , "inf_mem.csv" ) , train_time_csv_file=os.path.join(SCREAMING_SNAKE_CASE__ , "train_time.csv" ) , env_info_csv_file=os.path.join(SCREAMING_SNAKE_CASE__ , "env.csv" ) , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ )
benchmark.run()
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE__ , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE__ , "train_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE__ , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE__ , "train_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE__ , "env.csv" ) ).exists() )
def a ( self : Optional[Any] ) -> Any:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(SCREAMING_SNAKE_CASE__ : List[Any] ):
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "sequential" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "cumulative" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "current" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(SCREAMING_SNAKE_CASE__ , "log.txt" ) , log_print=SCREAMING_SNAKE_CASE__ , trace_memory_line_by_line=SCREAMING_SNAKE_CASE__ , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE__ , "log.txt" ) ).exists() )
| 221 | 1 |
from typing import List
from .keymap import KEYMAP, get_character
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
def decorator(SCREAMING_SNAKE_CASE__ : Optional[int] ):
_snake_case : List[Any] = getattr(snake_case__ , """handle_key""" , [] )
handle += [key]
setattr(snake_case__ , """handle_key""" , snake_case__ )
return func
return decorator
def lowercase ( *SCREAMING_SNAKE_CASE__ : List[str] ) -> List[Any]:
def decorator(SCREAMING_SNAKE_CASE__ : List[Any] ):
_snake_case : Optional[Any] = getattr(snake_case__ , """handle_key""" , [] )
handle += keys
setattr(snake_case__ , """handle_key""" , snake_case__ )
return func
return decorator
class snake_case ( __a ):
'''simple docstring'''
def __new__( cls : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any]) -> Dict:
"""simple docstring"""
_snake_case : Optional[int] = super().__new__(cls , a_ , a_ , a_)
if not hasattr(a_ , """key_handler"""):
setattr(a_ , """key_handler""" , {})
setattr(a_ , """handle_input""" , KeyHandler.handle_input)
for value in attrs.values():
_snake_case : List[Any] = getattr(a_ , """handle_key""" , [])
for key in handled_keys:
_snake_case : Dict = value
return new_cls
@staticmethod
def UpperCamelCase_ ( cls : str) -> Any:
"""simple docstring"""
_snake_case : List[Any] = get_character()
if char != KEYMAP["undefined"]:
_snake_case : Union[str, Any] = ord(a_)
_snake_case : Optional[int] = cls.key_handler.get(a_)
if handler:
_snake_case : Optional[int] = char
return handler(cls)
else:
return None
def lowercase ( cls : Union[str, Any] ) -> int:
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 317 |
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Any=7 ):
"""simple docstring"""
_snake_case : Any = None
if token is not None:
_snake_case : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
# The id of a workflow (not of a workflow run)
_snake_case : List[str] = """636036"""
_snake_case : Union[str, Any] = F"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"
_snake_case : str = requests.get(snake_case__ , headers=snake_case__ ).json()
return result["workflow_runs"]
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
_snake_case : str = get_daily_ci_runs(snake_case__ )
_snake_case : str = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_snake_case : List[str] = workflow_run["""id"""]
break
return workflow_run_id
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : Optional[Any] = get_last_daily_ci_runs(snake_case__ )
if workflow_run_id is not None:
_snake_case : Optional[Any] = get_artifacts_links(worflow_run_id=snake_case__ , token=snake_case__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_snake_case : Optional[int] = artifacts_links[artifact_name]
download_artifact(
artifact_name=snake_case__ , artifact_url=snake_case__ , output_dir=snake_case__ , token=snake_case__ )
def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int ):
"""simple docstring"""
get_last_daily_ci_artifacts(snake_case__ , snake_case__ , snake_case__ )
_snake_case : int = {}
for artifact_name in artifact_names:
_snake_case : int = os.path.join(snake_case__ , F"{artifact_name}.zip" )
if os.path.isfile(snake_case__ ):
_snake_case : Tuple = {}
with zipfile.ZipFile(snake_case__ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case__ ):
# read the file
with z.open(snake_case__ ) as f:
_snake_case : Any = f.read().decode("""UTF-8""" )
return results
| 64 | 0 |
'''simple docstring'''
from math import factorial
def SCREAMING_SNAKE_CASE__ ( __A = 100 ) -> Optional[int]:
return sum(int(__A ) for x in str(factorial(__A ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 359 |
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __UpperCAmelCase :
@staticmethod
def lowerCamelCase ( *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( __A ) -> str:
_snake_case = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def SCREAMING_SNAKE_CASE__ ( __A ) -> Dict:
_snake_case = np.array(__A )
_snake_case = npimg.shape
return {"hash": hashimage(__A ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
__lowercase = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
__lowercase = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = MaskGenerationPipeline(model=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
@slow
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = pipeline('mask-generation' , model='facebook/sam-vit-huge' )
_snake_case = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=2_56 )
# Shortening by hashing
_snake_case = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCAmelCase_ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (4_80, 6_40)}, 'scores': 1.0444},
{'mask': {'hash': '6affa964c6', 'shape': (4_80, 6_40)}, 'scores': 1.021},
{'mask': {'hash': 'dfe28a0388', 'shape': (4_80, 6_40)}, 'scores': 1.0167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (4_80, 6_40)}, 'scores': 1.0132},
{'mask': {'hash': 'fe8065c197', 'shape': (4_80, 6_40)}, 'scores': 1.0053},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (4_80, 6_40)}, 'scores': 0.9967},
{'mask': {'hash': '453c7844bd', 'shape': (4_80, 6_40)}, 'scores': 0.993},
{'mask': {'hash': '3d44f2926d', 'shape': (4_80, 6_40)}, 'scores': 0.9909},
{'mask': {'hash': '64033ddc3f', 'shape': (4_80, 6_40)}, 'scores': 0.9879},
{'mask': {'hash': '801064ff79', 'shape': (4_80, 6_40)}, 'scores': 0.9834},
{'mask': {'hash': '6172f276ef', 'shape': (4_80, 6_40)}, 'scores': 0.9716},
{'mask': {'hash': 'b49e60e084', 'shape': (4_80, 6_40)}, 'scores': 0.9612},
{'mask': {'hash': 'a811e775fd', 'shape': (4_80, 6_40)}, 'scores': 0.9599},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (4_80, 6_40)}, 'scores': 0.9552},
{'mask': {'hash': '9d8257e080', 'shape': (4_80, 6_40)}, 'scores': 0.9532},
{'mask': {'hash': '32de6454a8', 'shape': (4_80, 6_40)}, 'scores': 0.9516},
{'mask': {'hash': 'af3d4af2c8', 'shape': (4_80, 6_40)}, 'scores': 0.9499},
{'mask': {'hash': '3c6db475fb', 'shape': (4_80, 6_40)}, 'scores': 0.9483},
{'mask': {'hash': 'c290813fb9', 'shape': (4_80, 6_40)}, 'scores': 0.9464},
{'mask': {'hash': 'b6f0b8f606', 'shape': (4_80, 6_40)}, 'scores': 0.943},
{'mask': {'hash': '92ce16bfdf', 'shape': (4_80, 6_40)}, 'scores': 0.943},
{'mask': {'hash': 'c749b25868', 'shape': (4_80, 6_40)}, 'scores': 0.9408},
{'mask': {'hash': 'efb6cab859', 'shape': (4_80, 6_40)}, 'scores': 0.9335},
{'mask': {'hash': '1ff2eafb30', 'shape': (4_80, 6_40)}, 'scores': 0.9326},
{'mask': {'hash': '788b798e24', 'shape': (4_80, 6_40)}, 'scores': 0.9262},
{'mask': {'hash': 'abea804f0e', 'shape': (4_80, 6_40)}, 'scores': 0.8999},
{'mask': {'hash': '7b9e8ddb73', 'shape': (4_80, 6_40)}, 'scores': 0.8986},
{'mask': {'hash': 'cd24047c8a', 'shape': (4_80, 6_40)}, 'scores': 0.8984},
{'mask': {'hash': '6943e6bcbd', 'shape': (4_80, 6_40)}, 'scores': 0.8873},
{'mask': {'hash': 'b5f47c9191', 'shape': (4_80, 6_40)}, 'scores': 0.8871}
] , )
# fmt: on
@require_torch
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 'facebook/sam-vit-huge'
_snake_case = pipeline('mask-generation' , model=lowerCAmelCase_ )
_snake_case = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=2_56 )
# Shortening by hashing
_snake_case = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCAmelCase_ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (4_80, 6_40)}, 'scores': 1.0444},
{'mask': {'hash': '6affa964c6', 'shape': (4_80, 6_40)}, 'scores': 1.0210},
{'mask': {'hash': 'dfe28a0388', 'shape': (4_80, 6_40)}, 'scores': 1.0167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (4_80, 6_40)}, 'scores': 1.0132},
{'mask': {'hash': 'fe8065c197', 'shape': (4_80, 6_40)}, 'scores': 1.0053},
] , )
| 160 | 0 |
"""simple docstring"""
import datasets
_lowercase = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
_lowercase = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
_lowercase = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def _snake_case ( snake_case__ : Dict , snake_case__ : List[str] ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format='numpy' ,)
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ,A_ : Union[str, Any] ) -> Optional[Any]:
return {"accuracy": simple_accuracy(A_ ,A_ )} | 74 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__UpperCAmelCase = None
__UpperCAmelCase = '''<''' if sys.byteorder == '''little''' else '''>'''
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__UpperCAmelCase = [
np.dtype('''|b1'''),
np.dtype('''|u1'''),
np.dtype('''<u2'''),
np.dtype('''>u2'''),
np.dtype('''<i2'''),
np.dtype('''>i2'''),
np.dtype('''<u4'''),
np.dtype('''>u4'''),
np.dtype('''<i4'''),
np.dtype('''>i4'''),
np.dtype('''<f4'''),
np.dtype('''>f4'''),
np.dtype('''<f8'''),
np.dtype('''>f8'''),
]
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : Optional[str] = None
# Automatically constructed
UpperCAmelCase__ : ClassVar[str] = "PIL.Image.Image"
UpperCAmelCase__ : ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
UpperCAmelCase__ : str = field(default="Image" , init=a__ , repr=a__ )
def __call__( self ) -> Any:
return self.pa_type
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[int] = np.array(SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
return {"path": value, "bytes": None}
elif isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
return {"path": None, "bytes": value}
elif isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_, PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(SCREAMING_SNAKE_CASE_ )
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
F"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support decoding images, please install \'Pillow\'.' )
if token_per_repo_id is None:
UpperCamelCase : Any = {}
UpperCamelCase , UpperCamelCase : Union[str, Any] = value['path'], value['bytes']
if bytes_ is None:
if path is None:
raise ValueError(F"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" )
else:
if is_local_path(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : int = PIL.Image.open(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : int = path.split('::' )[-1]
try:
UpperCamelCase : Optional[Any] = string_to_dict(SCREAMING_SNAKE_CASE_, config.HUB_DATASETS_URL )['repo_id']
UpperCamelCase : str = token_per_repo_id.get(SCREAMING_SNAKE_CASE_ )
except ValueError:
UpperCamelCase : Tuple = None
with xopen(SCREAMING_SNAKE_CASE_, 'rb', use_auth_token=SCREAMING_SNAKE_CASE_ ) as f:
UpperCamelCase : Optional[int] = BytesIO(f.read() )
UpperCamelCase : int = PIL.Image.open(bytes_ )
else:
UpperCamelCase : Optional[int] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def snake_case_ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('binary' ),
"path": Value('string' ),
}
)
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
UpperCamelCase : List[str] = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ), type=pa.binary() )
UpperCamelCase : Dict = pa.StructArray.from_arrays([bytes_array, storage], ['bytes', 'path'], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCamelCase : Optional[int] = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ), type=pa.string() )
UpperCamelCase : Union[str, Any] = pa.StructArray.from_arrays([storage, path_array], ['bytes', 'path'], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
UpperCamelCase : List[str] = storage.field('bytes' )
else:
UpperCamelCase : List[str] = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ), type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
UpperCamelCase : List[str] = storage.field('path' )
else:
UpperCamelCase : Optional[Any] = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ), type=pa.string() )
UpperCamelCase : Optional[int] = pa.StructArray.from_arrays([bytes_array, path_array], ['bytes', 'path'], mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCamelCase : Optional[Any] = pa.array(
[encode_np_array(np.array(SCREAMING_SNAKE_CASE_ ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()], type=pa.binary(), )
UpperCamelCase : List[str] = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ), type=pa.string() )
UpperCamelCase : int = pa.StructArray.from_arrays(
[bytes_array, path_array], ['bytes', 'path'], mask=bytes_array.is_null() )
return array_cast(SCREAMING_SNAKE_CASE_, self.pa_type )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(SCREAMING_SNAKE_CASE_ ):
with xopen(SCREAMING_SNAKE_CASE_, 'rb' ) as f:
UpperCamelCase : Optional[int] = f.read()
return bytes_
UpperCamelCase : Union[str, Any] = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
UpperCamelCase : Any = pa.array(
[os.path.basename(SCREAMING_SNAKE_CASE_ ) if path is not None else None for path in storage.field('path' ).to_pylist()], type=pa.string(), )
UpperCamelCase : int = pa.StructArray.from_arrays([bytes_array, path_array], ['bytes', 'path'], mask=bytes_array.is_null() )
return array_cast(SCREAMING_SNAKE_CASE_, self.pa_type )
def UpperCamelCase ( ) -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCamelCase : Dict = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def UpperCamelCase ( snake_case__ : "PIL.Image.Image" ) -> bytes:
UpperCamelCase : Any = BytesIO()
if image.format in list_image_compression_formats():
UpperCamelCase : Tuple = image.format
else:
UpperCamelCase : List[str] = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF'
image.save(snake_case__ , format=snake_case__ )
return buffer.getvalue()
def UpperCamelCase ( snake_case__ : "PIL.Image.Image" ) -> dict:
if hasattr(snake_case__ , 'filename' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(snake_case__ )}
def UpperCamelCase ( snake_case__ : np.ndarray ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
UpperCamelCase : Union[str, Any] = array.dtype
UpperCamelCase : List[Any] = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER
UpperCamelCase : Optional[Any] = dtype.kind
UpperCamelCase : Any = dtype.itemsize
UpperCamelCase : int = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCamelCase : Optional[Any] = np.dtype('|u1' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCamelCase : List[Any] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCamelCase : Dict = dtype_byteorder + dtype_kind + str(snake_case__ )
UpperCamelCase : str = np.dtype(snake_case__ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
UpperCamelCase : Union[str, Any] = PIL.Image.fromarray(array.astype(snake_case__ ) )
return {"path": None, "bytes": image_to_bytes(snake_case__ )}
def UpperCamelCase ( snake_case__ : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if objs:
UpperCamelCase , UpperCamelCase : Union[str, Any] = first_non_null_value(snake_case__ )
if isinstance(snake_case__ , snake_case__ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(snake_case__ , np.ndarray ):
UpperCamelCase : List[Any] = no_op_if_value_is_null(snake_case__ )
return [obj_to_image_dict_func(snake_case__ ) for obj in objs]
elif isinstance(snake_case__ , PIL.Image.Image ):
UpperCamelCase : Optional[int] = no_op_if_value_is_null(snake_case__ )
return [obj_to_image_dict_func(snake_case__ ) for obj in objs]
else:
return objs
else:
return objs
| 119 | 0 |
def UpperCamelCase ( _A ):
"""simple docstring"""
return "".join([hex(_A )[2:].zfill(2 ).upper() for byte in list(_A )] )
def UpperCamelCase ( _A ):
"""simple docstring"""
if (len(_A ) % 2) != 0:
raise ValueError(
"""Base16 encoded data is invalid:
Data does not have an even number of hex digits.""" )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(_A ) <= set("""0123456789ABCDEF""" ):
raise ValueError(
"""Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.""" )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1], 16 ) for i in range(0, len(_A ), 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 138 |
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
__magic_name__: str = logging.getLogger(__name__)
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : int = argparse.ArgumentParser(
description="""Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).""" )
parser.add_argument("""--file_path""", type=_A, default="""data/dump.txt""", help="""The path to the data.""" )
parser.add_argument("""--tokenizer_type""", type=_A, default="""bert""", choices=["""bert""", """roberta""", """gpt2"""] )
parser.add_argument("""--tokenizer_name""", type=_A, default="""bert-base-uncased""", help="""The tokenizer to use.""" )
parser.add_argument("""--dump_file""", type=_A, default="""data/dump""", help="""The dump file prefix.""" )
__magic_name__ : Dict = parser.parse_args()
logger.info(f'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
__magic_name__ : Tuple = BertTokenizer.from_pretrained(args.tokenizer_name )
__magic_name__ : List[Any] = tokenizer.special_tokens_map["""cls_token"""] # `[CLS]`
__magic_name__ : Optional[int] = tokenizer.special_tokens_map["""sep_token"""] # `[SEP]`
elif args.tokenizer_type == "roberta":
__magic_name__ : Optional[int] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
__magic_name__ : List[Any] = tokenizer.special_tokens_map["""cls_token"""] # `<s>`
__magic_name__ : Any = tokenizer.special_tokens_map["""sep_token"""] # `</s>`
elif args.tokenizer_type == "gpt2":
__magic_name__ : Any = GPTaTokenizer.from_pretrained(args.tokenizer_name )
__magic_name__ : int = tokenizer.special_tokens_map["""bos_token"""] # `<|endoftext|>`
__magic_name__ : Optional[int] = tokenizer.special_tokens_map["""eos_token"""] # `<|endoftext|>`
logger.info(f'Loading text from {args.file_path}' )
with open(args.file_path, """r""", encoding="""utf8""" ) as fp:
__magic_name__ : Tuple = fp.readlines()
logger.info("""Start encoding""" )
logger.info(f'{len(_A )} examples to process.' )
__magic_name__ : List[Any] = []
__magic_name__ : str = 0
__magic_name__ : str = 10000
__magic_name__ : Dict = time.time()
for text in data:
__magic_name__ : Tuple = f'{bos} {text.strip()} {sep}'
__magic_name__ : Optional[int] = tokenizer.encode(_A, add_special_tokens=_A )
rslt.append(_A )
iter += 1
if iter % interval == 0:
__magic_name__ : Union[str, Any] = time.time()
logger.info(f'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
__magic_name__ : Any = time.time()
logger.info("""Finished binarization""" )
logger.info(f'{len(_A )} examples processed.' )
__magic_name__ : Tuple = f'{args.dump_file}.{args.tokenizer_name}.pickle'
__magic_name__ : Tuple = tokenizer.vocab_size
if vocab_size < (1 << 16):
__magic_name__ : Optional[int] = [np.uintaa(_A ) for d in rslt]
else:
__magic_name__ : str = [np.intaa(_A ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'Dump to {dp_file}' )
with open(_A, """wb""" ) as handle:
pickle.dump(rslt_, _A, protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 138 | 1 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase__ : int = 1_6
UpperCAmelCase__ : int = 3_2
def lowercase_ ( _snake_case ,_snake_case = 16 ):
SCREAMING_SNAKE_CASE__ : Dict = AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE__ : Tuple = load_dataset("""glue""" ,"""mrpc""" )
def tokenize_function(_snake_case ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=_snake_case ,max_length=_snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE__ : List[str] = datasets.map(
_snake_case ,batched=_snake_case ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ : Any = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(_snake_case ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE__ : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE__ : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE__ : Any = 8
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
return tokenizer.pad(
_snake_case ,padding="""longest""" ,max_length=_snake_case ,pad_to_multiple_of=_snake_case ,return_tensors="""pt""" ,)
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DataLoader(
tokenized_datasets["""train"""] ,shuffle=_snake_case ,collate_fn=_snake_case ,batch_size=_snake_case )
SCREAMING_SNAKE_CASE__ : int = DataLoader(
tokenized_datasets["""validation"""] ,shuffle=_snake_case ,collate_fn=_snake_case ,batch_size=_snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase__ : Union[str, Any] = mocked_dataloaders # noqa: F811
def lowercase_ ( _snake_case ,_snake_case ):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,_snake_case ) == "1":
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
# Initialize accelerator
SCREAMING_SNAKE_CASE__ : int = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ : List[str] = config["""lr"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(config["""num_epochs"""] )
SCREAMING_SNAKE_CASE__ : List[str] = int(config["""seed"""] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(config["""batch_size"""] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = evaluate.load("""glue""" ,"""mrpc""" )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE__ : Dict = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE__ : Tuple = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE__ : Any = MAX_GPU_BATCH_SIZE
set_seed(_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = get_dataloaders(_snake_case ,_snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ : List[Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=_snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE__ : Dict = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ : Dict = AdamW(params=model.parameters() ,lr=_snake_case )
# Instantiate scheduler
SCREAMING_SNAKE_CASE__ : Dict = get_linear_schedule_with_warmup(
optimizer=_snake_case ,num_warmup_steps=100 ,num_training_steps=(len(_snake_case ) * num_epochs) // gradient_accumulation_steps ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = accelerator.prepare(
_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case )
# Now we train the model
for epoch in range(_snake_case ):
model.train()
for step, batch in enumerate(_snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE__ : List[str] = model(**_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = outputs.loss
SCREAMING_SNAKE_CASE__ : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(_snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
for step, batch in enumerate(_snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[str] = model(**_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[int] = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = accelerator.gather((predictions, batch["""labels"""]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(_snake_case ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
SCREAMING_SNAKE_CASE__ : Any = predictions[: len(eval_dataloader.dataset ) - samples_seen]
SCREAMING_SNAKE_CASE__ : Optional[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=_snake_case ,references=_snake_case ,)
SCREAMING_SNAKE_CASE__ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' ,_snake_case )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" ,type=_snake_case ,default=_snake_case ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" ,)
parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" )
SCREAMING_SNAKE_CASE__ : Dict = parser.parse_args()
SCREAMING_SNAKE_CASE__ : List[str] = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(_snake_case ,_snake_case )
if __name__ == "__main__":
main()
| 25 |
"""simple docstring"""
def lowercase_ ( _snake_case ):
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(_snake_case ,_snake_case ):
raise TypeError("""Input value must be a 'int' type""" )
return bin(_snake_case ).count("""1""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 1 |
def _lowerCAmelCase ( __lowerCAmelCase ) -> Dict:
"""simple docstring"""
if not numbers:
return 0
if not isinstance(UpperCamelCase__ , (list, tuple) ) or not all(
isinstance(UpperCamelCase__ , UpperCamelCase__ ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
snake_case__ : Union[str, Any] = numbers[0]
for i in range(1 , len(UpperCamelCase__ ) ):
# update the maximum and minimum subarray products
snake_case__ : Tuple = numbers[i]
if number < 0:
snake_case__ : Tuple = min_till_now, max_till_now
snake_case__ : Union[str, Any] = max(UpperCamelCase__ , max_till_now * number )
snake_case__ : Dict = min(UpperCamelCase__ , min_till_now * number )
# update the maximum product found till now
snake_case__ : Any = max(UpperCamelCase__ , UpperCamelCase__ )
return max_prod
| 351 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 44 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {
"""configuration_poolformer""": [
"""POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""PoolFormerConfig""",
"""PoolFormerOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""PoolFormerFeatureExtractor"""]
a_ = ["""PoolFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PoolFormerForImageClassification""",
"""PoolFormerModel""",
"""PoolFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 179 |
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = jnp.floataa
_lowerCamelCase = True
def UpperCamelCase__( self ):
'''simple docstring'''
super().setup()
__A : List[Any] = nn.Dense(5 , dtype=self.dtype )
def __call__( self , *__lowerCamelCase , **__lowerCamelCase ):
'''simple docstring'''
__A : Tuple = super().__call__(*__lowerCamelCase , **__lowerCamelCase )
__A : Optional[int] = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = FlaxBigBirdForNaturalQuestionsModule
def __lowercase ( snake_case_ : Tuple ,snake_case_ : Optional[int] ,snake_case_ : int ,snake_case_ : Tuple ,snake_case_ : Any ,snake_case_ : Any ) ->List[Any]:
'''simple docstring'''
def cross_entropy(snake_case_ : str ,snake_case_ : Optional[Any] ,snake_case_ : Tuple=None ):
__A : Dict = logits.shape[-1]
__A : Dict = (labels[..., None] == jnp.arange(snake_case_ )[None]).astype('''f4''' )
__A : int = jax.nn.log_softmax(snake_case_ ,axis=-1 )
__A : Optional[int] = -jnp.sum(labels * logits ,axis=-1 )
if reduction is not None:
__A : Optional[int] = reduction(snake_case_ )
return loss
__A : str = partial(snake_case_ ,reduction=jnp.mean )
__A : Dict = cross_entropy(snake_case_ ,snake_case_ )
__A : List[str] = cross_entropy(snake_case_ ,snake_case_ )
__A : str = cross_entropy(snake_case_ ,snake_case_ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __snake_case :
"""simple docstring"""
_lowerCamelCase = "google/bigbird-roberta-base"
_lowerCamelCase = 30_00
_lowerCamelCase = 1_05_00
_lowerCamelCase = 1_28
_lowerCamelCase = 3
_lowerCamelCase = 1
_lowerCamelCase = 5
# tx_args
_lowerCamelCase = 3e-5
_lowerCamelCase = 0.0
_lowerCamelCase = 2_00_00
_lowerCamelCase = 0.0_0_9_5
_lowerCamelCase = "bigbird-roberta-natural-questions"
_lowerCamelCase = "training-expt"
_lowerCamelCase = "data/nq-training.jsonl"
_lowerCamelCase = "data/nq-validation.jsonl"
def UpperCamelCase__( self ):
'''simple docstring'''
os.makedirs(self.base_dir , exist_ok=__lowerCamelCase )
__A : Dict = os.path.join(self.base_dir , self.save_dir )
__A : Dict = self.batch_size_per_device * jax.device_count()
@dataclass
class __snake_case :
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 40_96 # no dynamic padding on TPUs
def __call__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Optional[int] = self.collate_fn(__lowerCamelCase )
__A : Tuple = jax.tree_util.tree_map(__lowerCamelCase , __lowerCamelCase )
return batch
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A , __A : List[Any] = self.fetch_inputs(features['''input_ids'''] )
__A : Union[str, Any] = {
'''input_ids''': jnp.array(__lowerCamelCase , dtype=jnp.intaa ),
'''attention_mask''': jnp.array(__lowerCamelCase , dtype=jnp.intaa ),
'''start_labels''': jnp.array(features['''start_token'''] , dtype=jnp.intaa ),
'''end_labels''': jnp.array(features['''end_token'''] , dtype=jnp.intaa ),
'''pooled_labels''': jnp.array(features['''category'''] , dtype=jnp.intaa ),
}
return batch
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Any = [self._fetch_inputs(__lowerCamelCase ) for ids in input_ids]
return zip(*__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Any = [1 for _ in range(len(__lowerCamelCase ) )]
while len(__lowerCamelCase ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def __lowercase ( snake_case_ : List[Any] ,snake_case_ : Optional[Any] ,snake_case_ : List[str]=None ) ->Optional[int]:
'''simple docstring'''
if seed is not None:
__A : List[Any] = dataset.shuffle(seed=snake_case_ )
for i in range(len(snake_case_ ) // batch_size ):
__A : Tuple = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(snake_case_ )
@partial(jax.pmap ,axis_name='''batch''' )
def __lowercase ( snake_case_ : str ,snake_case_ : Union[str, Any] ,**snake_case_ : List[str] ) ->Tuple:
'''simple docstring'''
def loss_fn(snake_case_ : List[str] ):
__A : str = model_inputs.pop('''start_labels''' )
__A : str = model_inputs.pop('''end_labels''' )
__A : int = model_inputs.pop('''pooled_labels''' )
__A : Dict = state.apply_fn(**snake_case_ ,params=snake_case_ ,dropout_rng=snake_case_ ,train=snake_case_ )
__A , __A , __A : Union[str, Any] = outputs
return state.loss_fn(
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,)
__A , __A : int = jax.random.split(snake_case_ )
__A : str = jax.value_and_grad(snake_case_ )
__A , __A : Optional[int] = grad_fn(state.params )
__A : List[str] = jax.lax.pmean({'''loss''': loss} ,axis_name='''batch''' )
__A : List[str] = jax.lax.pmean(snake_case_ ,'''batch''' )
__A : str = state.apply_gradients(grads=snake_case_ )
return state, metrics, new_drp_rng
@partial(jax.pmap ,axis_name='''batch''' )
def __lowercase ( snake_case_ : int ,**snake_case_ : Union[str, Any] ) ->List[str]:
'''simple docstring'''
__A : Tuple = model_inputs.pop('''start_labels''' )
__A : Dict = model_inputs.pop('''end_labels''' )
__A : int = model_inputs.pop('''pooled_labels''' )
__A : List[str] = state.apply_fn(**snake_case_ ,params=state.params ,train=snake_case_ )
__A , __A , __A : Dict = outputs
__A : Optional[int] = state.loss_fn(snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ )
__A : List[str] = jax.lax.pmean({'''loss''': loss} ,axis_name='''batch''' )
return metrics
class __snake_case ( train_state.TrainState ):
"""simple docstring"""
_lowerCamelCase = struct.field(pytree_node=SCREAMING_SNAKE_CASE__ )
@dataclass
class __snake_case :
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = None
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None ):
'''simple docstring'''
__A : Tuple = model.params
__A : Union[str, Any] = TrainState.create(
apply_fn=model.__call__ , params=__lowerCamelCase , tx=__lowerCamelCase , loss_fn=__lowerCamelCase , )
if ckpt_dir is not None:
__A , __A , __A , __A , __A : Optional[Any] = restore_checkpoint(__lowerCamelCase , __lowerCamelCase )
__A : List[Any] = {
'''lr''': args.lr,
'''init_lr''': args.init_lr,
'''warmup_steps''': args.warmup_steps,
'''num_train_steps''': num_train_steps,
'''weight_decay''': args.weight_decay,
}
__A , __A : List[str] = build_tx(**__lowerCamelCase )
__A : int = train_state.TrainState(
step=__lowerCamelCase , apply_fn=model.__call__ , params=__lowerCamelCase , tx=__lowerCamelCase , opt_state=__lowerCamelCase , )
__A : int = args
__A : Optional[Any] = data_collator
__A : Tuple = lr
__A : List[Any] = params
__A : Dict = jax_utils.replicate(__lowerCamelCase )
return state
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : List[Any] = self.args
__A : Dict = len(__lowerCamelCase ) // args.batch_size
__A : List[Any] = jax.random.PRNGKey(0 )
__A : Optional[Any] = jax.random.split(__lowerCamelCase , jax.device_count() )
for epoch in range(args.max_epochs ):
__A : Tuple = jnp.array(0 , dtype=jnp.floataa )
__A : Optional[Any] = get_batched_dataset(__lowerCamelCase , args.batch_size , seed=__lowerCamelCase )
__A : Union[str, Any] = 0
for batch in tqdm(__lowerCamelCase , total=__lowerCamelCase , desc=F"""Running EPOCH-{epoch}""" ):
__A : Optional[Any] = self.data_collator(__lowerCamelCase )
__A , __A , __A : Union[str, Any] = self.train_step_fn(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
if i % args.logging_steps == 0:
__A : Union[str, Any] = jax_utils.unreplicate(state.step )
__A : Optional[int] = running_loss.item() / i
__A : List[Any] = self.scheduler_fn(state_step - 1 )
__A : Union[str, Any] = self.evaluate(__lowerCamelCase , __lowerCamelCase )
__A : Optional[Any] = {
'''step''': state_step.item(),
'''eval_loss''': eval_loss.item(),
'''tr_loss''': tr_loss,
'''lr''': lr.item(),
}
tqdm.write(str(__lowerCamelCase ) )
self.logger.log(__lowerCamelCase , commit=__lowerCamelCase )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F"""-e{epoch}-s{i}""" , state=__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = get_batched_dataset(__lowerCamelCase , self.args.batch_size )
__A : int = len(__lowerCamelCase ) // self.args.batch_size
__A : Optional[Any] = jnp.array(0 , dtype=jnp.floataa )
__A : Dict = 0
for batch in tqdm(__lowerCamelCase , total=__lowerCamelCase , desc='''Evaluating ... ''' ):
__A : List[str] = self.data_collator(__lowerCamelCase )
__A : Union[str, Any] = self.val_step_fn(__lowerCamelCase , **__lowerCamelCase )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
return running_loss / i
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Dict = jax_utils.unreplicate(__lowerCamelCase )
print(F"""SAVING CHECKPOINT IN {save_dir}""" , end=''' ... ''' )
self.model_save_fn(__lowerCamelCase , params=state.params )
with open(os.path.join(__lowerCamelCase , '''opt_state.msgpack''' ) , '''wb''' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(__lowerCamelCase , '''args.joblib''' ) )
joblib.dump(self.data_collator , os.path.join(__lowerCamelCase , '''data_collator.joblib''' ) )
with open(os.path.join(__lowerCamelCase , '''training_state.json''' ) , '''w''' ) as f:
json.dump({'''step''': state.step.item()} , __lowerCamelCase )
print('''DONE''' )
def __lowercase ( snake_case_ : int ,snake_case_ : Dict ) ->Optional[int]:
'''simple docstring'''
print(F"""RESTORING CHECKPOINT FROM {save_dir}""" ,end=''' ... ''' )
with open(os.path.join(snake_case_ ,'''flax_model.msgpack''' ) ,'''rb''' ) as f:
__A : List[Any] = from_bytes(state.params ,f.read() )
with open(os.path.join(snake_case_ ,'''opt_state.msgpack''' ) ,'''rb''' ) as f:
__A : Optional[int] = from_bytes(state.opt_state ,f.read() )
__A : Tuple = joblib.load(os.path.join(snake_case_ ,'''args.joblib''' ) )
__A : List[str] = joblib.load(os.path.join(snake_case_ ,'''data_collator.joblib''' ) )
with open(os.path.join(snake_case_ ,'''training_state.json''' ) ,'''r''' ) as f:
__A : Dict = json.load(snake_case_ )
__A : int = training_state['''step''']
print('''DONE''' )
return params, opt_state, step, args, data_collator
def __lowercase ( snake_case_ : List[str] ,snake_case_ : Any ,snake_case_ : Dict ,snake_case_ : str ) ->List[str]:
'''simple docstring'''
__A : str = num_train_steps - warmup_steps
__A : Union[str, Any] = optax.linear_schedule(init_value=snake_case_ ,end_value=snake_case_ ,transition_steps=snake_case_ )
__A : Optional[Any] = optax.linear_schedule(init_value=snake_case_ ,end_value=1e-7 ,transition_steps=snake_case_ )
__A : Any = optax.join_schedules(schedules=[warmup_fn, decay_fn] ,boundaries=[warmup_steps] )
return lr
def __lowercase ( snake_case_ : List[str] ,snake_case_ : List[Any] ,snake_case_ : Union[str, Any] ,snake_case_ : List[Any] ,snake_case_ : str ) ->List[str]:
'''simple docstring'''
def weight_decay_mask(snake_case_ : List[Any] ):
__A : List[Any] = traverse_util.flatten_dict(snake_case_ )
__A : int = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()}
return traverse_util.unflatten_dict(snake_case_ )
__A : List[Any] = scheduler_fn(snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ )
__A : List[str] = optax.adamw(learning_rate=snake_case_ ,weight_decay=snake_case_ ,mask=snake_case_ )
return tx, lr
| 179 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["ViTFeatureExtractor"]
_SCREAMING_SNAKE_CASE = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 354 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 217 | 0 |
"""simple docstring"""
import string
def lowercase ( lowerCAmelCase__ : List[str] ) -> int:
for key in range(len(string.ascii_uppercase ) ):
__a = ""
for symbol in message:
if symbol in string.ascii_uppercase:
__a = string.ascii_uppercase.find(lowerCAmelCase__ )
__a = num - key
if num < 0:
__a = num + len(string.ascii_uppercase )
__a = translated + string.ascii_uppercase[num]
else:
__a = translated + symbol
print(f'''Decryption using Key #{key}: {translated}''' )
def lowercase ( ) -> List[str]:
__a = input('''Encrypted message: ''' )
__a = message.upper()
decrypt(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 45 |
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311 | 0 |
"""simple docstring"""
import os
import pytest
from attr import dataclass
_lowercase : int = 'us-east-1' # defaults region
@dataclass
class _UpperCAmelCase :
a__ : str
a__ : Optional[Any] = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
a__ : Tuple = {
"task_name": "mnli",
"per_device_train_batch_size": 16,
"per_device_eval_batch_size": 16,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 500,
"save_steps": 5_500,
}
a__ : Optional[int] = {**hyperparameters, "max_steps": 1_000}
@property
def a ( self : Any ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def a ( self : Optional[Any] ):
return F'''{self.framework}-transfromers-test'''
@property
def a ( self : Dict ):
return F'''./tests/sagemaker/scripts/{self.framework}'''
@property
def a ( self : Dict ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='''class''' )
def lowercase__ ( snake_case_ :Tuple ):
__UpperCAmelCase = SageMakerTestEnvironment(framework=request.cls.framework )
| 86 |
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class _UpperCAmelCase :
def __init__( self : Dict , _lowercase : int , _lowercase : List[str]=13 , _lowercase : Dict=32 , _lowercase : Any=2 , _lowercase : Optional[int]=3 , _lowercase : Optional[Any]=16 , _lowercase : Optional[int]=[1, 2, 1] , _lowercase : int=[2, 2, 4] , _lowercase : Optional[Any]=2 , _lowercase : Union[str, Any]=2.0 , _lowercase : Any=True , _lowercase : Optional[Any]=0.0 , _lowercase : Dict=0.0 , _lowercase : Dict=0.1 , _lowercase : str="gelu" , _lowercase : List[Any]=False , _lowercase : List[Any]=True , _lowercase : Optional[Any]=0.02 , _lowercase : str=1E-5 , _lowercase : str=True , _lowercase : Any=None , _lowercase : Tuple=True , _lowercase : Any=10 , _lowercase : int=8 , _lowercase : Optional[Any]=["stage1", "stage2", "stage3"] , _lowercase : Optional[Any]=[1, 2, 3] , ):
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = image_size
__UpperCAmelCase = patch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = embed_dim
__UpperCAmelCase = depths
__UpperCAmelCase = num_heads
__UpperCAmelCase = window_size
__UpperCAmelCase = mlp_ratio
__UpperCAmelCase = qkv_bias
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = drop_path_rate
__UpperCAmelCase = hidden_act
__UpperCAmelCase = use_absolute_embeddings
__UpperCAmelCase = patch_norm
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = initializer_range
__UpperCAmelCase = is_training
__UpperCAmelCase = scope
__UpperCAmelCase = use_labels
__UpperCAmelCase = type_sequence_label_size
__UpperCAmelCase = encoder_stride
__UpperCAmelCase = out_features
__UpperCAmelCase = out_indices
def a ( self : int ):
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def a ( self : Dict ):
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def a ( self : List[Any] , _lowercase : Union[str, Any] , _lowercase : str , _lowercase : int ):
__UpperCAmelCase = MaskFormerSwinModel(config=_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = model(_lowercase )
__UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a ( self : int , _lowercase : Optional[Any] , _lowercase : Any , _lowercase : Dict ):
__UpperCAmelCase = MaskFormerSwinBackbone(config=_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = model(_lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_lowercase ):
__UpperCAmelCase = ['''stem''']
__UpperCAmelCase = MaskFormerSwinBackbone(config=_lowercase )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = config_and_inputs
__UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : List[Any] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
a__ : Optional[int] = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
a__ : List[str] = False
a__ : int = False
a__ : str = False
a__ : str = False
a__ : Any = False
def a ( self : Optional[Any] ):
__UpperCAmelCase = MaskFormerSwinModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=_lowercase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'''`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'''
''' `nn.DataParallel`'''
) )
def a ( self : int ):
pass
def a ( self : Dict ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a ( self : str ):
return
def a ( self : Optional[Any] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowercase )
@unittest.skip('''Swin does not use inputs_embeds''' )
def a ( self : List[Any] ):
pass
@unittest.skip('''Swin does not support feedforward chunking''' )
def a ( self : str ):
pass
def a ( self : Union[str, Any] ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(_lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , nn.Linear ) )
def a ( self : Union[str, Any] ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(_lowercase )
__UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase = [*signature.parameters.keys()]
__UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowercase )
@unittest.skip(reason='''MaskFormerSwin is only used as backbone and doesn\'t support output_attentions''' )
def a ( self : Optional[Any] ):
pass
@unittest.skip(reason='''MaskFormerSwin is only used as an internal backbone''' )
def a ( self : Optional[Any] ):
pass
def a ( self : List[Any] , _lowercase : Union[str, Any] , _lowercase : List[str] , _lowercase : Dict , _lowercase : Tuple ):
__UpperCAmelCase = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
__UpperCAmelCase = model(**self._prepare_for_class(_lowercase , _lowercase ) )
__UpperCAmelCase = outputs.hidden_states
__UpperCAmelCase = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
# Swin has a different seq_length
__UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a ( self : str ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__UpperCAmelCase = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , _lowercase )
def a ( self : Optional[Any] ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = 3
__UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__UpperCAmelCase = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , (padded_height, padded_width) )
@unittest.skip(reason='''MaskFormerSwin doesn\'t have pretrained checkpoints''' )
def a ( self : Any ):
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def a ( self : str ):
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def a ( self : Tuple ):
pass
def a ( self : Tuple ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_lowercase : List[str] ):
__UpperCAmelCase = 0
return t
def check_equivalence(_lowercase : List[Any] , _lowercase : Any , _lowercase : str , _lowercase : List[str]={} ):
with torch.no_grad():
__UpperCAmelCase = model(**_lowercase , return_dict=_lowercase , **_lowercase )
__UpperCAmelCase = model(**_lowercase , return_dict=_lowercase , **_lowercase ).to_tuple()
def recursive_check(_lowercase : Dict , _lowercase : Optional[Any] ):
if isinstance(_lowercase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowercase , _lowercase ):
recursive_check(_lowercase , _lowercase )
elif isinstance(_lowercase , _lowercase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_lowercase , _lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_lowercase ) , set_nan_tensor_to_zero(_lowercase ) , atol=1E-5 ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
F''' {torch.isnan(_lowercase ).any()} and `inf`: {torch.isinf(_lowercase )}. Dict has'''
F''' `nan`: {torch.isnan(_lowercase ).any()} and `inf`: {torch.isinf(_lowercase )}.'''
) , )
recursive_check(_lowercase , _lowercase )
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase , {'''output_hidden_states''': True} )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase , {'''output_hidden_states''': True} )
@require_torch
class _UpperCAmelCase ( unittest.TestCase , _lowerCAmelCase ):
a__ : Optional[Any] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
a__ : List[str] = MaskFormerSwinConfig
def a ( self : List[str] ):
__UpperCAmelCase = MaskFormerSwinModelTester(self )
def a ( self : List[Any] ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = inputs_dict['''pixel_values'''].shape[0]
for backbone_class in self.all_model_classes:
__UpperCAmelCase = backbone_class(_lowercase )
backbone.to(_lowercase )
backbone.eval()
__UpperCAmelCase = backbone(**_lowercase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _lowercase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__UpperCAmelCase = backbone(**_lowercase , output_hidden_states=_lowercase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__UpperCAmelCase = backbone(**_lowercase , output_attentions=_lowercase )
self.assertIsNotNone(outputs.attentions )
| 86 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.