code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
A_ = TypeVar("KT")
A_ = TypeVar("VT")
class UpperCAmelCase ( Generic[KT, VT] ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ = "root" , SCREAMING_SNAKE_CASE_ = None ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = key
lowerCamelCase_ = value
lowerCamelCase_ = []
def __repr__( self ) -> str:
'''simple docstring'''
return f'''Node({self.key}: {self.value})'''
@property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
return len(self.forward )
class UpperCAmelCase ( Generic[KT, VT] ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ = 0.5 , SCREAMING_SNAKE_CASE_ = 16 ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = Node[KT, VT]()
lowerCamelCase_ = 0
lowerCamelCase_ = p
lowerCamelCase_ = max_level
def __str__( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = list(self )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return f'''SkipList(level={self.level})'''
lowerCamelCase_ = max((len(str(SCREAMING_SNAKE_CASE_ ) ) for item in items) , default=4 )
lowerCamelCase_ = max(SCREAMING_SNAKE_CASE_ , 4 ) + 4
lowerCamelCase_ = self.head
lowerCamelCase_ = []
lowerCamelCase_ = node.forward.copy()
lines.append(f'''[{node.key}]'''.ljust(SCREAMING_SNAKE_CASE_ , '-' ) + '* ' * len(SCREAMING_SNAKE_CASE_ ) )
lines.append(' ' * label_size + '| ' * len(SCREAMING_SNAKE_CASE_ ) )
while len(node.forward ) != 0:
lowerCamelCase_ = node.forward[0]
lines.append(
f'''[{node.key}]'''.ljust(SCREAMING_SNAKE_CASE_ , '-' )
+ ' '.join(str(n.key ) if n.key == node.key else '|' for n in forwards ) )
lines.append(' ' * label_size + '| ' * len(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = node.forward
lines.append('None'.ljust(SCREAMING_SNAKE_CASE_ ) + '* ' * len(SCREAMING_SNAKE_CASE_ ) )
return f'''SkipList(level={self.level})\n''' + "\n".join(SCREAMING_SNAKE_CASE_ )
def __iter__( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
lowerCamelCase_ = node.forward[0]
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
'''simple docstring'''
lowerCamelCase_ = []
lowerCamelCase_ = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
lowerCamelCase_ = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(SCREAMING_SNAKE_CASE_ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self._locate_node(SCREAMING_SNAKE_CASE_ )
if node is not None:
for i, update_node in enumerate(SCREAMING_SNAKE_CASE_ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
lowerCamelCase_ = node.forward[i]
else:
lowerCamelCase_ = update_node.forward[:i]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self._locate_node(SCREAMING_SNAKE_CASE_ )
if node is not None:
lowerCamelCase_ = value
else:
lowerCamelCase_ = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , SCREAMING_SNAKE_CASE_ ):
update_vector.append(self.head )
lowerCamelCase_ = level
lowerCamelCase_ = Node(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(SCREAMING_SNAKE_CASE_ )
else:
lowerCamelCase_ = new_node
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> VT | None:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self._locate_node(SCREAMING_SNAKE_CASE_ )
if node is not None:
return node.value
return None
def _UpperCamelCase ( ) -> Any:
lowerCamelCase_ = SkipList()
skip_list.insert('Key1' ,3 )
skip_list.insert('Key2' ,12 )
skip_list.insert('Key3' ,41 )
skip_list.insert('Key4' ,-19 )
lowerCamelCase_ = skip_list.head
lowerCamelCase_ = {}
while node.level != 0:
lowerCamelCase_ = node.forward[0]
lowerCamelCase_ = node.value
assert len(__UpperCamelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def _UpperCamelCase ( ) -> Optional[Any]:
lowerCamelCase_ = SkipList()
skip_list.insert('Key1' ,10 )
skip_list.insert('Key1' ,12 )
skip_list.insert('Key5' ,7 )
skip_list.insert('Key7' ,10 )
skip_list.insert('Key10' ,5 )
skip_list.insert('Key7' ,7 )
skip_list.insert('Key5' ,5 )
skip_list.insert('Key10' ,10 )
lowerCamelCase_ = skip_list.head
lowerCamelCase_ = {}
while node.level != 0:
lowerCamelCase_ = node.forward[0]
lowerCamelCase_ = node.value
if len(__UpperCamelCase ) != 4:
print()
assert len(__UpperCamelCase ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def _UpperCamelCase ( ) -> int:
lowerCamelCase_ = SkipList()
assert skip_list.find('Some key' ) is None
def _UpperCamelCase ( ) -> Optional[Any]:
lowerCamelCase_ = SkipList()
skip_list.insert('Key2' ,20 )
assert skip_list.find('Key2' ) == 20
skip_list.insert('Some Key' ,10 )
skip_list.insert('Key2' ,8 )
skip_list.insert('V' ,13 )
assert skip_list.find('Y' ) is None
assert skip_list.find('Key2' ) == 8
assert skip_list.find('Some Key' ) == 10
assert skip_list.find('V' ) == 13
def _UpperCamelCase ( ) -> List[str]:
lowerCamelCase_ = SkipList()
skip_list.delete('Some key' )
assert len(skip_list.head.forward ) == 0
def _UpperCamelCase ( ) -> List[Any]:
lowerCamelCase_ = SkipList()
skip_list.insert('Key1' ,12 )
skip_list.insert('V' ,13 )
skip_list.insert('X' ,14 )
skip_list.insert('Key2' ,15 )
skip_list.delete('V' )
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('Key2' ) is None
def _UpperCamelCase ( ) -> List[Any]:
lowerCamelCase_ = SkipList()
skip_list.insert('Key1' ,12 )
skip_list.insert('V' ,13 )
skip_list.insert('X' ,14 )
skip_list.insert('Key2' ,15 )
skip_list.delete('V' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) == 14
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('X' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key1' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) is None
def _UpperCamelCase ( ) -> str:
lowerCamelCase_ = SkipList()
skip_list.insert('Key1' ,12 )
skip_list.insert('V' ,13 )
skip_list.insert('X' ,1_42 )
skip_list.insert('Key2' ,15 )
skip_list.delete('X' )
def traverse_keys(__UpperCamelCase ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(__UpperCamelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def _UpperCamelCase ( ) -> List[str]:
def is_sorted(__UpperCamelCase ):
return all(next_item >= item for item, next_item in zip(__UpperCamelCase ,lst[1:] ) )
lowerCamelCase_ = SkipList()
for i in range(10 ):
skip_list.insert(__UpperCamelCase ,__UpperCamelCase )
assert is_sorted(list(__UpperCamelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(__UpperCamelCase ) )
skip_list.insert(-12 ,-12 )
skip_list.insert(77 ,77 )
assert is_sorted(list(__UpperCamelCase ) )
def _UpperCamelCase ( ) -> List[str]:
for _ in range(1_00 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def _UpperCamelCase ( ) -> List[str]:
lowerCamelCase_ = SkipList()
skip_list.insert(2 ,'2' )
skip_list.insert(4 ,'4' )
skip_list.insert(6 ,'4' )
skip_list.insert(4 ,'5' )
skip_list.insert(8 ,'4' )
skip_list.insert(9 ,'4' )
skip_list.delete(4 )
print(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 42 |
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = False ) -> bool:
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_31_70_44_06_46_79_88_73_85_96_19_81 and not allow_probable:
raise ValueError(
'Warning: upper bound of deterministic test is exceeded. '
'Pass allow_probable=True to allow probabilistic test. '
'A return value of True indicates a probable prime.' )
# array bounds provided by analysis
lowerCamelCase_ = [
20_47,
1_37_36_53,
25_32_60_01,
32_15_03_17_51,
2_15_23_02_89_87_47,
3_47_47_49_66_03_83,
3_41_55_00_71_72_83_21,
1,
3_82_51_23_05_65_46_41_30_51,
1,
1,
31_86_65_85_78_34_03_11_51_16_74_61,
3_31_70_44_06_46_79_88_73_85_96_19_81,
]
lowerCamelCase_ = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(__UpperCamelCase ,1 ):
if n < _p:
# then we have our last prime to check
lowerCamelCase_ = primes[:idx]
break
lowerCamelCase_ ,lowerCamelCase_ = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowerCamelCase_ = False
for r in range(__UpperCamelCase ):
lowerCamelCase_ = pow(__UpperCamelCase ,d * 2**r ,__UpperCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowerCamelCase_ = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def _UpperCamelCase ( ) -> None:
assert not miller_rabin(5_61 )
assert miller_rabin(5_63 )
# 2047
assert not miller_rabin(83_82_01 )
assert miller_rabin(83_82_07 )
# 1_373_653
assert not miller_rabin(17_31_60_01 )
assert miller_rabin(17_31_60_17 )
# 25_326_001
assert not miller_rabin(30_78_38_66_41 )
assert miller_rabin(30_78_38_66_53 )
# 3_215_031_751
assert not miller_rabin(1_71_30_45_57_48_01 )
assert miller_rabin(1_71_30_45_57_48_19 )
# 2_152_302_898_747
assert not miller_rabin(2_77_97_99_72_83_07 )
assert miller_rabin(2_77_97_99_72_83_27 )
# 3_474_749_660_383
assert not miller_rabin(1_13_85_00_23_90_94_41 )
assert miller_rabin(1_13_85_00_23_90_95_27 )
# 341_550_071_728_321
assert not miller_rabin(1_27_50_41_01_88_48_80_43_51 )
assert miller_rabin(1_27_50_41_01_88_48_80_43_91 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_96_66_46_44_58_50_77_87_79_18_67 )
assert miller_rabin(7_96_66_46_44_58_50_77_87_79_19_51 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(55_28_40_67_74_46_64_78_97_66_03_33 )
assert miller_rabin(55_28_40_67_74_46_64_78_97_66_03_59 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 42 | 1 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int:
lowerCamelCase_ = original_name.split('.' )[0]
lowerCamelCase_ = key.split('.' )
lowerCamelCase_ = int(key_list[key_list.index(__UpperCamelCase ) - 2] )
lowerCamelCase_ = int(key_list[key_list.index(__UpperCamelCase ) - 1] )
lowerCamelCase_ = orig_block_num - offset
lowerCamelCase_ = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' ,f'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def _UpperCamelCase ( __UpperCamelCase ) -> Dict:
lowerCamelCase_ = OrderedDict()
lowerCamelCase_ ,lowerCamelCase_ = 0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
lowerCamelCase_ = key.replace('network' ,'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
lowerCamelCase_ = key[: key.find('proj' )]
lowerCamelCase_ = key.replace(__UpperCamelCase ,f'''patch_embeddings.{total_embed_found}.''' )
lowerCamelCase_ = key.replace('proj' ,'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
lowerCamelCase_ = 'poolformer.encoder.' + key
if "mlp.fc1" in key:
lowerCamelCase_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,'mlp.fc1' ,'output.conv1' )
if "mlp.fc2" in key:
lowerCamelCase_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,'mlp.fc2' ,'output.conv2' )
if "norm1" in key:
lowerCamelCase_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,'norm1' ,'before_norm' )
if "norm2" in key:
lowerCamelCase_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,'norm2' ,'after_norm' )
if "layer_scale_1" in key:
lowerCamelCase_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,'layer_scale_1' ,'layer_scale_1' )
if "layer_scale_2" in key:
lowerCamelCase_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,'layer_scale_2' ,'layer_scale_2' )
if "head" in key:
lowerCamelCase_ = key.replace('head' ,'classifier' )
lowerCamelCase_ = value
return new_state_dict
def _UpperCamelCase ( ) -> Optional[Any]:
lowerCamelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return image
@torch.no_grad()
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
lowerCamelCase_ = PoolFormerConfig()
# set attributes based on model_name
lowerCamelCase_ = 'huggingface/label-files'
lowerCamelCase_ = model_name[-3:]
lowerCamelCase_ = 10_00
lowerCamelCase_ = 'imagenet-1k-id2label.json'
lowerCamelCase_ = (1, 10_00)
# set config attributes
lowerCamelCase_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type='dataset' ) ,'r' ) )
lowerCamelCase_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
if size == "s12":
lowerCamelCase_ = [2, 2, 6, 2]
lowerCamelCase_ = [64, 1_28, 3_20, 5_12]
lowerCamelCase_ = 4.0
lowerCamelCase_ = 0.9
elif size == "s24":
lowerCamelCase_ = [4, 4, 12, 4]
lowerCamelCase_ = [64, 1_28, 3_20, 5_12]
lowerCamelCase_ = 4.0
lowerCamelCase_ = 0.9
elif size == "s36":
lowerCamelCase_ = [6, 6, 18, 6]
lowerCamelCase_ = [64, 1_28, 3_20, 5_12]
lowerCamelCase_ = 4.0
lowerCamelCase_ = 1e-6
lowerCamelCase_ = 0.9
elif size == "m36":
lowerCamelCase_ = [6, 6, 18, 6]
lowerCamelCase_ = [96, 1_92, 3_84, 7_68]
lowerCamelCase_ = 4.0
lowerCamelCase_ = 1e-6
lowerCamelCase_ = 0.95
elif size == "m48":
lowerCamelCase_ = [8, 8, 24, 8]
lowerCamelCase_ = [96, 1_92, 3_84, 7_68]
lowerCamelCase_ = 4.0
lowerCamelCase_ = 1e-6
lowerCamelCase_ = 0.95
else:
raise ValueError(f'''Size {size} not supported''' )
# load image processor
lowerCamelCase_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase )
# Prepare image
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=__UpperCamelCase ,return_tensors='pt' ).pixel_values
logger.info(f'''Converting model {model_name}...''' )
# load original state dict
lowerCamelCase_ = torch.load(__UpperCamelCase ,map_location=torch.device('cpu' ) )
# rename keys
lowerCamelCase_ = rename_keys(__UpperCamelCase )
# create HuggingFace model and load state dict
lowerCamelCase_ = PoolFormerForImageClassification(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
# Define image processor
lowerCamelCase_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase )
lowerCamelCase_ = image_processor(images=prepare_img() ,return_tensors='pt' ).pixel_values
# forward pass
lowerCamelCase_ = model(__UpperCamelCase )
lowerCamelCase_ = outputs.logits
# define expected logit slices for different models
if size == "s12":
lowerCamelCase_ = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
lowerCamelCase_ = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
lowerCamelCase_ = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
lowerCamelCase_ = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
lowerCamelCase_ = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] ,__UpperCamelCase ,atol=1e-2 )
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="poolformer_s12",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
A_ = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 42 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A_ = 16
A_ = 32
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = 16 ,__UpperCamelCase = "bert-base-cased" ) -> List[Any]:
lowerCamelCase_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
lowerCamelCase_ = load_dataset('glue' ,'mrpc' )
def tokenize_function(__UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ = tokenizer(examples['sentence1'] ,examples['sentence2'] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase_ = datasets.map(
__UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=['idx', 'sentence1', 'sentence2'] ,load_from_cache_file=__UpperCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ = tokenized_datasets.rename_column('label' ,'labels' )
def collate_fn(__UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCamelCase ,padding='max_length' ,max_length=1_28 ,return_tensors='pt' )
return tokenizer.pad(__UpperCamelCase ,padding='longest' ,return_tensors='pt' )
# Instantiate dataloaders.
lowerCamelCase_ = DataLoader(
tokenized_datasets['train'] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
lowerCamelCase_ = DataLoader(
tokenized_datasets['validation'] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
model.eval()
lowerCamelCase_ = 0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ = model(**__UpperCamelCase )
lowerCamelCase_ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCamelCase_ ,lowerCamelCase_ = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__UpperCamelCase ) - 1:
lowerCamelCase_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCamelCase_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__UpperCamelCase ,references=__UpperCamelCase ,)
lowerCamelCase_ = metric.compute()
return eval_metric["accuracy"]
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> List[str]:
# Initialize accelerator
lowerCamelCase_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ = config['lr']
lowerCamelCase_ = int(config['num_epochs'] )
lowerCamelCase_ = int(config['seed'] )
lowerCamelCase_ = int(config['batch_size'] )
lowerCamelCase_ = args.model_name_or_path
set_seed(__UpperCamelCase )
lowerCamelCase_ ,lowerCamelCase_ = get_dataloaders(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained(__UpperCamelCase ,return_dict=__UpperCamelCase )
# Instantiate optimizer
lowerCamelCase_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase_ = optimizer_cls(params=model.parameters() ,lr=__UpperCamelCase )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase_ = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowerCamelCase_ = 1
lowerCamelCase_ = (len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase_ = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase ,num_warmup_steps=0 ,num_training_steps=__UpperCamelCase ,)
else:
lowerCamelCase_ = DummyScheduler(__UpperCamelCase ,total_num_steps=__UpperCamelCase ,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = accelerator.prepare(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase_ = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase_ = 0
lowerCamelCase_ = evaluate.load('glue' ,'mrpc' )
lowerCamelCase_ = num_epochs
if args.partial_train_epoch is not None:
lowerCamelCase_ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCamelCase_ = args.resume_from_checkpoint.split('epoch_' )[1]
lowerCamelCase_ = ''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCamelCase_ = int(__UpperCamelCase ) + 1
lowerCamelCase_ = evaluation_loop(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
accelerator.print('resumed checkpoint performance:' ,__UpperCamelCase )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' ,lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' ,optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir ,f'''state_{starting_epoch-1}.json''' ) ,'r' ) as f:
lowerCamelCase_ = json.load(__UpperCamelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCamelCase_ = {}
for epoch in range(__UpperCamelCase ,__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
lowerCamelCase_ = model(**__UpperCamelCase )
lowerCamelCase_ = outputs.loss
lowerCamelCase_ = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCamelCase_ = f'''epoch_{epoch}'''
lowerCamelCase_ = os.path.join(args.output_dir ,__UpperCamelCase )
accelerator.save_state(__UpperCamelCase )
lowerCamelCase_ = evaluation_loop(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
lowerCamelCase_ = accuracy
lowerCamelCase_ = lr_scheduler.get_lr()[0]
lowerCamelCase_ = optimizer.param_groups[0]['lr']
lowerCamelCase_ = epoch
lowerCamelCase_ = overall_step
accelerator.print(f'''epoch {epoch}:''' ,__UpperCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,f'''state_{epoch}.json''' ) ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCamelCase ( ) -> str:
lowerCamelCase_ = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' ,type=__UpperCamelCase ,default='bert-base-cased' ,help='Path to pretrained model or model identifier from huggingface.co/models.' ,required=__UpperCamelCase ,)
parser.add_argument(
'--output_dir' ,type=__UpperCamelCase ,default='.' ,help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' ,)
parser.add_argument(
'--resume_from_checkpoint' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help='If the training should continue from a checkpoint folder.' ,)
parser.add_argument(
'--partial_train_epoch' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help='If passed, the training will stop after this number of epochs.' ,)
parser.add_argument(
'--num_epochs' ,type=__UpperCamelCase ,default=2 ,help='Number of train epochs.' ,)
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
main()
| 42 | 1 |
'''simple docstring'''
A_ = "Input must be a string of 8 numbers plus letter"
A_ = "TRWAGMYFPDXBNJZSQVHLCKE"
def _UpperCamelCase ( __UpperCamelCase ) -> bool:
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowerCamelCase_ = f'''Expected string as input, found {type(__UpperCamelCase ).__name__}'''
raise TypeError(__UpperCamelCase )
lowerCamelCase_ = spanish_id.replace('-' ,'' ).upper()
if len(__UpperCamelCase ) != 9:
raise ValueError(__UpperCamelCase )
try:
lowerCamelCase_ = int(spanish_id_clean[0:8] )
lowerCamelCase_ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__UpperCamelCase ) from ex
if letter.isdigit():
raise ValueError(__UpperCamelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> np.ndarray:
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
lowerCamelCase_ = ksize + 1
lowerCamelCase_ = np.zeros((ksize, ksize) ,dtype=np.floataa )
# each value
for y in range(__UpperCamelCase ):
for x in range(__UpperCamelCase ):
# distance from center
lowerCamelCase_ = x - ksize // 2
lowerCamelCase_ = y - ksize // 2
# degree to radiant
lowerCamelCase_ = theta / 1_80 * np.pi
lowerCamelCase_ = np.cos(_theta )
lowerCamelCase_ = np.sin(_theta )
# get kernel x
lowerCamelCase_ = cos_theta * px + sin_theta * py
# get kernel y
lowerCamelCase_ = -sin_theta * px + cos_theta * py
# fill kernel
lowerCamelCase_ = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
A_ = imread("../image_data/lena.jpg")
# turn image in gray scale value
A_ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
A_ = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
A_ = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
A_ = out / out.max() * 255
A_ = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 42 | 1 |
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
A_ = True
except ImportError:
A_ = False
A_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def _UpperCamelCase ( __UpperCamelCase ) -> str:
return AddNewModelCommand(args.testing ,args.testing_file ,path=args.path )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@staticmethod
def UpperCamelCase( SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
lowerCamelCase_ = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' , action='store_true' , help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' , type=SCREAMING_SNAKE_CASE_ , help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' , type=SCREAMING_SNAKE_CASE_ , help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , *SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = testing
lowerCamelCase_ = testing_file
lowerCamelCase_ = path
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
lowerCamelCase_ = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(SCREAMING_SNAKE_CASE_ ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
lowerCamelCase_ = (
Path(SCREAMING_SNAKE_CASE_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
lowerCamelCase_ = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(SCREAMING_SNAKE_CASE_ ) )
else:
with open(self._testing_file , 'r' ) as configuration_file:
lowerCamelCase_ = json.load(SCREAMING_SNAKE_CASE_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=SCREAMING_SNAKE_CASE_ , extra_context=SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' , 'r' ) as configuration_file:
lowerCamelCase_ = json.load(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = configuration['lowercase_modelname']
lowerCamelCase_ = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(f'''{directory}/configuration.json''' )
lowerCamelCase_ = 'PyTorch' in generate_tensorflow_pytorch_and_flax
lowerCamelCase_ = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
lowerCamelCase_ = 'Flax' in generate_tensorflow_pytorch_and_flax
lowerCamelCase_ = f'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'''
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
os.makedirs(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=SCREAMING_SNAKE_CASE_ )
# Tests require submodules as they have parent imports
with open(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , 'w' ):
pass
shutil.move(
f'''{directory}/__init__.py''' , f'''{model_dir}/__init__.py''' , )
shutil.move(
f'''{directory}/configuration_{lowercase_model_name}.py''' , f'''{model_dir}/configuration_{lowercase_model_name}.py''' , )
def remove_copy_lines(SCREAMING_SNAKE_CASE_ ):
with open(SCREAMING_SNAKE_CASE_ , 'r' ) as f:
lowerCamelCase_ = f.readlines()
with open(SCREAMING_SNAKE_CASE_ , 'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(SCREAMING_SNAKE_CASE_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_{lowercase_model_name}.py''' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_tf_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' )
if output_flax:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_flax_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/{lowercase_model_name}.md''' , f'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , )
shutil.move(
f'''{directory}/tokenization_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# Create temp file
lowerCamelCase_ ,lowerCamelCase_ = mkstemp()
lowerCamelCase_ = False
with fdopen(SCREAMING_SNAKE_CASE_ , 'w' ) as new_file:
with open(SCREAMING_SNAKE_CASE_ ) as old_file:
for line in old_file:
new_file.write(SCREAMING_SNAKE_CASE_ )
if line_to_copy_below in line:
lowerCamelCase_ = True
for line_to_copy in lines_to_copy:
new_file.write(SCREAMING_SNAKE_CASE_ )
if not line_found:
raise ValueError(f'''Line {line_to_copy_below} was not found in file.''' )
# Copy the file permissions from the old file to the new file
copymode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Remove original file
remove(SCREAMING_SNAKE_CASE_ )
# Move new file
move(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def skip_units(SCREAMING_SNAKE_CASE_ ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(SCREAMING_SNAKE_CASE_ ):
with open(SCREAMING_SNAKE_CASE_ ) as datafile:
lowerCamelCase_ = []
lowerCamelCase_ = False
lowerCamelCase_ = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
lowerCamelCase_ = line.split('"' )[1]
lowerCamelCase_ = skip_units(SCREAMING_SNAKE_CASE_ )
elif "# Below: " in line and "##" not in line:
lowerCamelCase_ = line.split('"' )[1]
lowerCamelCase_ = skip_units(SCREAMING_SNAKE_CASE_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = []
elif "# Replace with" in line and "##" not in line:
lowerCamelCase_ = []
elif "##" not in line:
lines_to_copy.append(SCREAMING_SNAKE_CASE_ )
remove(SCREAMING_SNAKE_CASE_ )
replace_in_files(f'''{directory}/to_replace_{lowercase_model_name}.py''' )
os.rmdir(SCREAMING_SNAKE_CASE_ )
| 42 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = ['transformers', 'torch', 'note_seq']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def UpperCamelCase( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def UpperCamelCase( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 42 | 1 |
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase = 10 ) -> str:
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or n < 0:
raise ValueError('Invalid input' )
lowerCamelCase_ = 10**n
lowerCamelCase_ = 2_84_33 * (pow(2 ,7_83_04_57 ,__UpperCamelCase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(10) = }''')
| 42 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return model
@property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , cross_attention_dim=10 , )
return model
@property
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , )
lowerCamelCase_ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return vqvae, unet
@slow
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
lowerCamelCase_ = DDPMScheduler()
lowerCamelCase_ = AudioDiffusionPipeline(vqvae=SCREAMING_SNAKE_CASE_ , unet=self.dummy_unet , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , steps=4 )
lowerCamelCase_ = output.audios[0]
lowerCamelCase_ = output.images[0]
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , steps=4 , return_dict=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.frombuffer(image_from_tuple.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowerCamelCase_ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
lowerCamelCase_ = DDIMScheduler()
lowerCamelCase_ = self.dummy_vqvae_and_unet
lowerCamelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
np.random.seed(0 )
lowerCamelCase_ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(raw_audio=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , start_step=5 , steps=10 )
lowerCamelCase_ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowerCamelCase_ = self.dummy_unet_condition
lowerCamelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=SCREAMING_SNAKE_CASE_ , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
np.random.seed(0 )
lowerCamelCase_ = torch.rand((1, 1, 10) )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , encoding=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output.images[0]
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = torch_device
lowerCamelCase_ = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output.audios[0]
lowerCamelCase_ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 42 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 42 |
'''simple docstring'''
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _UpperCamelCase ( __UpperCamelCase = 8 ) -> str:
lowerCamelCase_ = ascii_letters + digits + punctuation
return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(__UpperCamelCase )
lowerCamelCase_ = i // 3
lowerCamelCase_ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowerCamelCase_ = (
chars_incl
+ random(__UpperCamelCase ,quotient + remainder )
+ random(__UpperCamelCase ,__UpperCamelCase )
+ random(__UpperCamelCase ,__UpperCamelCase )
)
lowerCamelCase_ = list(__UpperCamelCase )
shuffle(__UpperCamelCase )
return "".join(__UpperCamelCase )
# random is a generalised function for letters, characters and numbers
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
pass # Put your code here...
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Tuple:
pass # Put your code here...
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
pass # Put your code here...
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = 8 ) -> bool:
if len(__UpperCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
lowerCamelCase_ = any(char in ascii_uppercase for char in password )
lowerCamelCase_ = any(char in ascii_lowercase for char in password )
lowerCamelCase_ = any(char in digits for char in password )
lowerCamelCase_ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _UpperCamelCase ( ) -> Optional[int]:
lowerCamelCase_ = int(input('Please indicate the max length of your password: ' ).strip() )
lowerCamelCase_ = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' ,password_generator(__UpperCamelCase ) )
print(
'Alternative Password generated:' ,alternative_password_generator(__UpperCamelCase ,__UpperCamelCase ) ,)
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 42 | 1 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self ) -> Optional[int]:
'''simple docstring'''
super().__init__()
lowerCamelCase_ = nn.Linear(3 , 4 )
lowerCamelCase_ = nn.BatchNormad(4 )
lowerCamelCase_ = nn.Linear(4 , 5 )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(SCREAMING_SNAKE_CASE_ ) ) )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
return output + 1
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = ModelForTest()
lowerCamelCase_ = ModelHook()
add_hook_to_module(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(test_model._hf_hook , SCREAMING_SNAKE_CASE_ )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(SCREAMING_SNAKE_CASE_ )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE_ , '_hf_hook' ) )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE_ , '_old_forward' ) )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = ModelForTest()
lowerCamelCase_ = ModelHook()
add_hook_to_module(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
add_hook_to_module(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , append=SCREAMING_SNAKE_CASE_ )
self.assertEqual(isinstance(test_model._hf_hook , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(SCREAMING_SNAKE_CASE_ )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE_ , '_hf_hook' ) )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE_ , '_old_forward' ) )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = ModelForTest()
lowerCamelCase_ = torch.randn(2 , 3 )
lowerCamelCase_ = test_model(x + 1 )
lowerCamelCase_ = test_model(x + 2 )
lowerCamelCase_ = PreForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = test_model(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase_ = PreForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = test_model(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase_ = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = test_model(SCREAMING_SNAKE_CASE_ )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-5 )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = ModelForTest()
lowerCamelCase_ = torch.randn(2 , 3 )
lowerCamelCase_ = test_model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = PostForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = test_model(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase_ = PostForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = test_model(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase_ = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = test_model(SCREAMING_SNAKE_CASE_ )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , output + 2 , atol=1E-5 )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = ModelForTest()
lowerCamelCase_ = torch.randn(2 , 3 )
lowerCamelCase_ = test_model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = PostForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = test_model(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
lowerCamelCase_ = True
lowerCamelCase_ = test_model(SCREAMING_SNAKE_CASE_ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
lowerCamelCase_ = torch.randn(2 , 3 )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(SCREAMING_SNAKE_CASE_ , AlignDevicesHook(io_same_device=SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = torch.randn(2 , 3 ).to(0 )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.assertEqual(output.device , torch.device(0 ) )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
lowerCamelCase_ = {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE_ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**SCREAMING_SNAKE_CASE_ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE_ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase_ = torch.device(hook_kwargs['execution_device'] )
self.assertEqual(model.batchnorm.running_mean.device , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.randn(2 , 3 )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
lowerCamelCase_ = {
'execution_device': 0 if torch.cuda.is_available() else 'cpu',
'offload': True,
'offload_buffers': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE_ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**SCREAMING_SNAKE_CASE_ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE_ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
lowerCamelCase_ = torch.randn(2 , 3 )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
lowerCamelCase_ = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(SCREAMING_SNAKE_CASE_ , execution_device=SCREAMING_SNAKE_CASE_ , offload=SCREAMING_SNAKE_CASE_ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase_ = torch.device(SCREAMING_SNAKE_CASE_ )
self.assertEqual(model.batchnorm.running_mean.device , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.randn(2 , 3 )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE_ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(SCREAMING_SNAKE_CASE_ , execution_device=SCREAMING_SNAKE_CASE_ , offload=SCREAMING_SNAKE_CASE_ , offload_buffers=SCREAMING_SNAKE_CASE_ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
lowerCamelCase_ = torch.randn(2 , 3 )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE_ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
lowerCamelCase_ = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(
SCREAMING_SNAKE_CASE_ , execution_device=SCREAMING_SNAKE_CASE_ , offload=SCREAMING_SNAKE_CASE_ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase_ = torch.device(SCREAMING_SNAKE_CASE_ )
self.assertEqual(model.batchnorm.running_mean.device , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.randn(2 , 3 )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE_ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
SCREAMING_SNAKE_CASE_ , execution_device=SCREAMING_SNAKE_CASE_ , offload=SCREAMING_SNAKE_CASE_ , weights_map=model.state_dict() , offload_buffers=SCREAMING_SNAKE_CASE_ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
lowerCamelCase_ = torch.randn(2 , 3 )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE_ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
| 42 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = inspect.getfile(accelerate.test_utils )
lowerCamelCase_ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase_ = test_metrics
@require_cpu
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
debug_launcher(self.test_metrics.main )
@require_single_gpu
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
print(f'''Found {torch.cuda.device_count()} devices.''' )
lowerCamelCase_ = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
| 42 | 1 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> np.ndarray:
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
lowerCamelCase_ = ksize + 1
lowerCamelCase_ = np.zeros((ksize, ksize) ,dtype=np.floataa )
# each value
for y in range(__UpperCamelCase ):
for x in range(__UpperCamelCase ):
# distance from center
lowerCamelCase_ = x - ksize // 2
lowerCamelCase_ = y - ksize // 2
# degree to radiant
lowerCamelCase_ = theta / 1_80 * np.pi
lowerCamelCase_ = np.cos(_theta )
lowerCamelCase_ = np.sin(_theta )
# get kernel x
lowerCamelCase_ = cos_theta * px + sin_theta * py
# get kernel y
lowerCamelCase_ = -sin_theta * px + cos_theta * py
# fill kernel
lowerCamelCase_ = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
A_ = imread("../image_data/lena.jpg")
# turn image in gray scale value
A_ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
A_ = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
A_ = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
A_ = out / out.max() * 255
A_ = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 42 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def _UpperCamelCase ( __UpperCamelCase ) -> List[str]:
if hor == 1_28:
lowerCamelCase_ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
lowerCamelCase_ = (32, 1_28, 2_56)
lowerCamelCase_ = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
lowerCamelCase_ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
lowerCamelCase_ = (32, 64, 1_28, 2_56)
lowerCamelCase_ = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
lowerCamelCase_ = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
lowerCamelCase_ = model.state_dict()
lowerCamelCase_ = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 6_55_36,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
lowerCamelCase_ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase_ = dict(zip(model.state_dict().keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase_ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() ,f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCamelCase ( ) -> Tuple:
lowerCamelCase_ = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 1_28, 2_56),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 6_55_36,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
lowerCamelCase_ = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
lowerCamelCase_ = model
lowerCamelCase_ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase_ = dict(zip(state_dict.keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase_ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() ,'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 42 | 1 |
'''simple docstring'''
import argparse
import datetime
def _UpperCamelCase ( __UpperCamelCase ) -> str:
lowerCamelCase_ = {
'0': 'Sunday',
'1': 'Monday',
'2': 'Tuesday',
'3': 'Wednesday',
'4': 'Thursday',
'5': 'Friday',
'6': 'Saturday',
}
lowerCamelCase_ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(__UpperCamelCase ) < 11:
raise ValueError('Must be 10 characters long' )
# Get month
lowerCamelCase_ = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('Month must be between 1 - 12' )
lowerCamelCase_ = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get day
lowerCamelCase_ = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('Date must be between 1 - 31' )
# Get second separator
lowerCamelCase_ = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get year
lowerCamelCase_ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 85_00:
raise ValueError(
'Year out of range. There has to be some sort of limit...right?' )
# Get datetime obj for validation
lowerCamelCase_ = datetime.date(int(__UpperCamelCase ) ,int(__UpperCamelCase ) ,int(__UpperCamelCase ) )
# Start math
if m <= 2:
lowerCamelCase_ = y - 1
lowerCamelCase_ = m + 12
# maths var
lowerCamelCase_ = int(str(__UpperCamelCase )[:2] )
lowerCamelCase_ = int(str(__UpperCamelCase )[2:] )
lowerCamelCase_ = int(2.6 * m - 5.39 )
lowerCamelCase_ = int(c / 4 )
lowerCamelCase_ = int(k / 4 )
lowerCamelCase_ = int(d + k )
lowerCamelCase_ = int(t + u + v + x )
lowerCamelCase_ = int(z - (2 * c) )
lowerCamelCase_ = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('The date was evaluated incorrectly. Contact developer.' )
# Response
lowerCamelCase_ = f'''Your date {date_input}, is a {days[str(__UpperCamelCase )]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ = argparse.ArgumentParser(
description=(
"Find out what day of the week nearly any date is or was. Enter "
"date as a string in the mm-dd-yyyy or mm/dd/yyyy format"
)
)
parser.add_argument(
"date_input", type=str, help="Date as a string (mm-dd-yyyy or mm/dd/yyyy)"
)
A_ = parser.parse_args()
zeller(args.date_input)
| 42 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A_ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A_ = 250_004
A_ = 250_020
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = MBartTokenizer
SCREAMING_SNAKE_CASE_ = MBartTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ = MBartTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = MBartTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCamelCase_ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase_ = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
lowerCamelCase_ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=True
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=False
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'facebook/mbart-large-en-ro'
SCREAMING_SNAKE_CASE_ = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
SCREAMING_SNAKE_CASE_ = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
SCREAMING_SNAKE_CASE_ = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE]
@classmethod
def UpperCamelCase( cls ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
lowerCamelCase_ = 1
return cls
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 250020 )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
self.assertIn(SCREAMING_SNAKE_CASE_ , self.tokenizer.all_special_ids )
lowerCamelCase_ = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
lowerCamelCase_ = self.tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 10
lowerCamelCase_ = self.tokenizer(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [250026, 250001] )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = MBartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , SCREAMING_SNAKE_CASE_ )
@require_torch
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
lowerCamelCase_ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
lowerCamelCase_ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
lowerCamelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(self.src_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=3 , return_tensors='pt' )
lowerCamelCase_ = self.tokenizer(
text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=10 , return_tensors='pt' )
lowerCamelCase_ = targets['input_ids']
lowerCamelCase_ = shift_tokens_right(SCREAMING_SNAKE_CASE_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , {
# A, test, EOS, en_XX
'input_ids': [[62, 3034, 2, 250004]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 250001,
} , )
| 42 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ = logging.get_logger(__name__)
A_ = {
"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'nat'
SCREAMING_SNAKE_CASE_ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=[3, 4, 6, 5] , SCREAMING_SNAKE_CASE_=[2, 4, 8, 16] , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=3.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = embed_dim
lowerCamelCase_ = depths
lowerCamelCase_ = len(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = num_heads
lowerCamelCase_ = kernel_size
lowerCamelCase_ = mlp_ratio
lowerCamelCase_ = qkv_bias
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = hidden_act
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase_ = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE_ ) - 1) )
lowerCamelCase_ = layer_scale_init_value
lowerCamelCase_ = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(SCREAMING_SNAKE_CASE_ ) + 1 )]
lowerCamelCase_ ,lowerCamelCase_ = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , stage_names=self.stage_names )
| 42 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = 'ylacombe/bark-small'
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = 'en_speaker_1'
lowerCamelCase_ = 'This is a test string'
lowerCamelCase_ = 'speaker_embeddings_path.json'
lowerCamelCase_ = 'speaker_embeddings'
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowerCamelCase_ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCamelCase_ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowerCamelCase_ = 35
lowerCamelCase_ = 2
lowerCamelCase_ = 8
lowerCamelCase_ = {
'semantic_prompt': np.ones(SCREAMING_SNAKE_CASE_ ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowerCamelCase_ = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowerCamelCase_ = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowerCamelCase_ = processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = processor(text=self.input_string )
lowerCamelCase_ = tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 42 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'llama'
SCREAMING_SNAKE_CASE_ = ['past_key_values']
def __init__( self , SCREAMING_SNAKE_CASE_=32000 , SCREAMING_SNAKE_CASE_=4096 , SCREAMING_SNAKE_CASE_=11008 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="silu" , SCREAMING_SNAKE_CASE_=2048 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-6 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = vocab_size
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = hidden_size
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = num_key_value_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = initializer_range
lowerCamelCase_ = rms_norm_eps
lowerCamelCase_ = pretraining_tp
lowerCamelCase_ = use_cache
lowerCamelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , tie_word_embeddings=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , SCREAMING_SNAKE_CASE_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'''got {self.rope_scaling}''' )
lowerCamelCase_ = self.rope_scaling.get('type' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.rope_scaling.get('factor' , SCREAMING_SNAKE_CASE_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 42 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
A_ = {
"vocab_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
},
"merges_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
},
}
A_ = {
"allenai/longformer-base-4096": 4_096,
"allenai/longformer-large-4096": 4_096,
"allenai/longformer-large-4096-finetuned-triviaqa": 4_096,
"allenai/longformer-base-4096-extra.pos.embd.only": 4_096,
"allenai/longformer-large-4096-extra.pos.embd.only": 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCamelCase ( ) -> Any:
lowerCamelCase_ = (
list(range(ord('!' ) ,ord('~' ) + 1 ) ) + list(range(ord('¡' ) ,ord('¬' ) + 1 ) ) + list(range(ord('®' ) ,ord('ÿ' ) + 1 ) )
)
lowerCamelCase_ = bs[:]
lowerCamelCase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCamelCase )
cs.append(2**8 + n )
n += 1
lowerCamelCase_ = [chr(__UpperCamelCase ) for n in cs]
return dict(zip(__UpperCamelCase ,__UpperCamelCase ) )
def _UpperCamelCase ( __UpperCamelCase ) -> List[str]:
lowerCamelCase_ = set()
lowerCamelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase_ = char
return pairs
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask']
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="replace" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as vocab_handle:
lowerCamelCase_ = json.load(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {v: k for k, v in self.encoder.items()}
lowerCamelCase_ = errors # how to handle errors in decoding
lowerCamelCase_ = bytes_to_unicode()
lowerCamelCase_ = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as merges_handle:
lowerCamelCase_ = merges_handle.read().split('\n' )[1:-1]
lowerCamelCase_ = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCamelCase_ = {}
lowerCamelCase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
return len(self.encoder )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCamelCase_ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
lowerCamelCase_ = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_ ,lowerCamelCase_ = bigram
lowerCamelCase_ = []
lowerCamelCase_ = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
lowerCamelCase_ = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase_ = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
lowerCamelCase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = ' '.join(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = word
return word
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
lowerCamelCase_ = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(' ' ) )
return bpe_tokens
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = ''.join(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '\n' )
lowerCamelCase_ = 0
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
lowerCamelCase_ = token_index
writer.write(' '.join(SCREAMING_SNAKE_CASE_ ) + '\n' )
index += 1
return vocab_file, merge_file
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
lowerCamelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()):
lowerCamelCase_ = ' ' + text
return (text, kwargs)
| 42 | 1 |
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase ) -> int:
assert column_title.isupper()
lowerCamelCase_ = 0
lowerCamelCase_ = len(__UpperCamelCase ) - 1
lowerCamelCase_ = 0
while index >= 0:
lowerCamelCase_ = (ord(column_title[index] ) - 64) * pow(26 ,__UpperCamelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 42 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
A_ = {
"vocab_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt",
},
"tokenizer_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"
),
"google/realm-orqa-nq-openqa": (
"https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-nq-reader": (
"https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-openqa": (
"https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-reader": (
"https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"
),
},
}
A_ = {
"google/realm-cc-news-pretrained-embedder": 512,
"google/realm-cc-news-pretrained-encoder": 512,
"google/realm-cc-news-pretrained-scorer": 512,
"google/realm-cc-news-pretrained-openqa": 512,
"google/realm-orqa-nq-openqa": 512,
"google/realm-orqa-nq-reader": 512,
"google/realm-orqa-wq-openqa": 512,
"google/realm-orqa-wq-reader": 512,
}
A_ = {
"google/realm-cc-news-pretrained-embedder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-encoder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-scorer": {"do_lower_case": True},
"google/realm-cc-news-pretrained-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-reader": {"do_lower_case": True},
"google/realm-orqa-wq-openqa": {"do_lower_case": True},
"google/realm-orqa-wq-reader": {"do_lower_case": True},
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = RealmTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="[UNK]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[PAD]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> List[Any]:
'''simple docstring'''
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE_ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE_ ) != tokenize_chinese_chars
):
lowerCamelCase_ = getattr(SCREAMING_SNAKE_CASE_ , normalizer_state.pop('type' ) )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = strip_accents
lowerCamelCase_ = tokenize_chinese_chars
lowerCamelCase_ = normalizer_class(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = do_lower_case
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = PaddingStrategy.MAX_LENGTH
lowerCamelCase_ = text
lowerCamelCase_ = kwargs.pop('text_pair' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = kwargs.pop('return_tensors' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(SCREAMING_SNAKE_CASE_ ):
if batch_text_pair is not None:
lowerCamelCase_ = batch_text_pair[idx]
else:
lowerCamelCase_ = None
lowerCamelCase_ = super().__call__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = encoded_candidates.get('input_ids' )
lowerCamelCase_ = encoded_candidates.get('attention_mask' )
lowerCamelCase_ = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(SCREAMING_SNAKE_CASE_ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(SCREAMING_SNAKE_CASE_ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {key: item for key, item in output_data.items() if len(SCREAMING_SNAKE_CASE_ ) != 0}
return BatchEncoding(SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
'''simple docstring'''
lowerCamelCase_ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 42 | 1 |
'''simple docstring'''
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def _UpperCamelCase ( ) -> tuple[list[int], int]:
lowerCamelCase_ = [randint(-10_00 ,10_00 ) for i in range(10 )]
lowerCamelCase_ = randint(-50_00 ,50_00 )
return (arr, r)
A_ = make_dataset()
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> tuple[int, ...]:
for triplet in permutations(__UpperCamelCase ,3 ):
if sum(__UpperCamelCase ) == target:
return tuple(sorted(__UpperCamelCase ) )
return (0, 0, 0)
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> tuple[int, int, int]:
arr.sort()
lowerCamelCase_ = len(__UpperCamelCase )
for i in range(n - 1 ):
lowerCamelCase_ ,lowerCamelCase_ = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def _UpperCamelCase ( ) -> tuple[float, float]:
lowerCamelCase_ = '\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n'
lowerCamelCase_ = '\ntriplet_sum1(*dataset)\n'
lowerCamelCase_ = '\ntriplet_sum2(*dataset)\n'
lowerCamelCase_ = repeat(setup=__UpperCamelCase ,stmt=__UpperCamelCase ,repeat=5 ,number=1_00_00 )
lowerCamelCase_ = repeat(setup=__UpperCamelCase ,stmt=__UpperCamelCase ,repeat=5 ,number=1_00_00 )
return (min(__UpperCamelCase ), min(__UpperCamelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
A_ = solution_times()
print(f'''The time for naive implementation is {times[0]}.''')
print(f'''The time for optimized implementation is {times[1]}.''')
| 42 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __UpperCamelCase ) -> bool:
lowerCamelCase_ = str(__UpperCamelCase )
return len(__UpperCamelCase ) == 9 and set(__UpperCamelCase ) == set('123456789' )
def _UpperCamelCase ( ) -> int | None:
for base_num in range(99_99 ,49_99 ,-1 ):
lowerCamelCase_ = 10_00_02 * base_num
if is_9_pandigital(__UpperCamelCase ):
return candidate
for base_num in range(3_33 ,99 ,-1 ):
lowerCamelCase_ = 1_00_20_03 * base_num
if is_9_pandigital(__UpperCamelCase ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 42 | 1 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
A_ = True
except (ImportError, AttributeError):
A_ = object
def _UpperCamelCase ( *__UpperCamelCase ,**__UpperCamelCase ) -> Optional[int]:
pass
A_ = False
A_ = logging.get_logger("transformers-cli/serving")
def _UpperCamelCase ( __UpperCamelCase ) -> int:
lowerCamelCase_ = pipeline(
task=args.task ,model=args.model if args.model else None ,config=args.config ,tokenizer=args.tokenizer ,device=args.device ,)
return ServeCommand(__UpperCamelCase ,args.host ,args.port ,args.workers )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 42
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 42
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 42
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@staticmethod
def UpperCamelCase( SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
lowerCamelCase_ = parser.add_parser(
'serve' , help='CLI tool to run inference requests through REST and GraphQL endpoints.' )
serve_parser.add_argument(
'--task' , type=SCREAMING_SNAKE_CASE_ , choices=get_supported_tasks() , help='The task to run the pipeline on' , )
serve_parser.add_argument('--host' , type=SCREAMING_SNAKE_CASE_ , default='localhost' , help='Interface the server will listen on.' )
serve_parser.add_argument('--port' , type=SCREAMING_SNAKE_CASE_ , default=8888 , help='Port the serving will listen to.' )
serve_parser.add_argument('--workers' , type=SCREAMING_SNAKE_CASE_ , default=1 , help='Number of http workers' )
serve_parser.add_argument('--model' , type=SCREAMING_SNAKE_CASE_ , help='Model\'s name or path to stored model.' )
serve_parser.add_argument('--config' , type=SCREAMING_SNAKE_CASE_ , help='Model\'s config name or path to stored model.' )
serve_parser.add_argument('--tokenizer' , type=SCREAMING_SNAKE_CASE_ , help='Tokenizer name to use.' )
serve_parser.add_argument(
'--device' , type=SCREAMING_SNAKE_CASE_ , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
serve_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = pipeline
lowerCamelCase_ = host
lowerCamelCase_ = port
lowerCamelCase_ = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'Using serve command requires FastAPI and uvicorn. '
'Please install transformers with [serving]: pip install "transformers[serving]".'
'Or install FastAPI and uvicorn separately.' )
else:
logger.info(f'''Serving model over {host}:{port}''' )
lowerCamelCase_ = FastAPI(
routes=[
APIRoute(
'/' , self.model_info , response_model=SCREAMING_SNAKE_CASE_ , response_class=SCREAMING_SNAKE_CASE_ , methods=['GET'] , ),
APIRoute(
'/tokenize' , self.tokenize , response_model=SCREAMING_SNAKE_CASE_ , response_class=SCREAMING_SNAKE_CASE_ , methods=['POST'] , ),
APIRoute(
'/detokenize' , self.detokenize , response_model=SCREAMING_SNAKE_CASE_ , response_class=SCREAMING_SNAKE_CASE_ , methods=['POST'] , ),
APIRoute(
'/forward' , self.forward , response_model=SCREAMING_SNAKE_CASE_ , response_class=SCREAMING_SNAKE_CASE_ , methods=['POST'] , ),
] , timeout=600 , )
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
run(self._app , host=self.host , port=self.port , workers=self.workers )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ = Body(SCREAMING_SNAKE_CASE_ , embed=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ = Body(SCREAMING_SNAKE_CASE_ , embed=SCREAMING_SNAKE_CASE_ ) ) -> Dict:
'''simple docstring'''
try:
lowerCamelCase_ = self._pipeline.tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
if return_ids:
lowerCamelCase_ = self._pipeline.tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
return ServeTokenizeResult(tokens=SCREAMING_SNAKE_CASE_ , tokens_ids=SCREAMING_SNAKE_CASE_ )
else:
return ServeTokenizeResult(tokens=SCREAMING_SNAKE_CASE_ )
except Exception as e:
raise HTTPException(status_code=500 , detail={'model': '', 'error': str(SCREAMING_SNAKE_CASE_ )} )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ = Body(SCREAMING_SNAKE_CASE_ , embed=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ = Body(SCREAMING_SNAKE_CASE_ , embed=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ = Body(SCREAMING_SNAKE_CASE_ , embed=SCREAMING_SNAKE_CASE_ ) , ) -> Tuple:
'''simple docstring'''
try:
lowerCamelCase_ = self._pipeline.tokenizer.decode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return ServeDeTokenizeResult(model='' , text=SCREAMING_SNAKE_CASE_ )
except Exception as e:
raise HTTPException(status_code=500 , detail={'model': '', 'error': str(SCREAMING_SNAKE_CASE_ )} )
async def UpperCamelCase( self , SCREAMING_SNAKE_CASE_=Body(SCREAMING_SNAKE_CASE_ , embed=SCREAMING_SNAKE_CASE_ ) ) -> int:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
lowerCamelCase_ = self._pipeline(SCREAMING_SNAKE_CASE_ )
return ServeForwardResult(output=SCREAMING_SNAKE_CASE_ )
except Exception as e:
raise HTTPException(500 , {'error': str(SCREAMING_SNAKE_CASE_ )} )
| 42 |
'''simple docstring'''
A_ = "Input must be a string of 8 numbers plus letter"
A_ = "TRWAGMYFPDXBNJZSQVHLCKE"
def _UpperCamelCase ( __UpperCamelCase ) -> bool:
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowerCamelCase_ = f'''Expected string as input, found {type(__UpperCamelCase ).__name__}'''
raise TypeError(__UpperCamelCase )
lowerCamelCase_ = spanish_id.replace('-' ,'' ).upper()
if len(__UpperCamelCase ) != 9:
raise ValueError(__UpperCamelCase )
try:
lowerCamelCase_ = int(spanish_id_clean[0:8] )
lowerCamelCase_ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__UpperCamelCase ) from ex
if letter.isdigit():
raise ValueError(__UpperCamelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 | 1 |
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
A_ = "scheduler_config.json"
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 5
@dataclass
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 42
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = SCHEDULER_CONFIG_NAME
SCREAMING_SNAKE_CASE_ = ['dtype']
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = True
@classmethod
def UpperCamelCase( cls , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = cls.load_config(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE_ , subfolder=SCREAMING_SNAKE_CASE_ , return_unused_kwargs=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ ,lowerCamelCase_ = cls.from_config(SCREAMING_SNAKE_CASE_ , return_unused_kwargs=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if hasattr(SCREAMING_SNAKE_CASE_ , 'create_state' ) and getattr(SCREAMING_SNAKE_CASE_ , 'has_state' , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False , **SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
self.save_config(save_directory=SCREAMING_SNAKE_CASE_ , push_to_hub=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def UpperCamelCase( cls ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = list(set([cls.__name__] + cls._compatibles ) )
lowerCamelCase_ = importlib.import_module(__name__.split('.' )[0] )
lowerCamelCase_ = [
getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for c in compatible_classes_str if hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
]
return compatible_classes
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> jnp.ndarray:
assert len(__UpperCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(__UpperCamelCase ) - x.ndim) ) ,__UpperCamelCase )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase=0.999 ,__UpperCamelCase=jnp.floataa ) -> jnp.ndarray:
def alpha_bar(__UpperCamelCase ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
lowerCamelCase_ = []
for i in range(__UpperCamelCase ):
lowerCamelCase_ = i / num_diffusion_timesteps
lowerCamelCase_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(__UpperCamelCase ) / alpha_bar(__UpperCamelCase ) ,__UpperCamelCase ) )
return jnp.array(__UpperCamelCase ,dtype=__UpperCamelCase )
@flax.struct.dataclass
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
@classmethod
def UpperCamelCase( cls , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = scheduler.config
if config.trained_betas is not None:
lowerCamelCase_ = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowerCamelCase_ = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCamelCase_ = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCamelCase_ = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
lowerCamelCase_ = 1.0 - betas
lowerCamelCase_ = jnp.cumprod(SCREAMING_SNAKE_CASE_ , axis=0 )
return cls(
alphas=SCREAMING_SNAKE_CASE_ , betas=SCREAMING_SNAKE_CASE_ , alphas_cumprod=SCREAMING_SNAKE_CASE_ , )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Any:
lowerCamelCase_ = state.alphas_cumprod
lowerCamelCase_ = alphas_cumprod[timesteps] ** 0.5
lowerCamelCase_ = sqrt_alpha_prod.flatten()
lowerCamelCase_ = broadcast_to_shape_from_left(__UpperCamelCase ,original_samples.shape )
lowerCamelCase_ = (1 - alphas_cumprod[timesteps]) ** 0.5
lowerCamelCase_ = sqrt_one_minus_alpha_prod.flatten()
lowerCamelCase_ = broadcast_to_shape_from_left(__UpperCamelCase ,original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[str]:
lowerCamelCase_ ,lowerCamelCase_ = get_sqrt_alpha_prod(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
lowerCamelCase_ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict:
lowerCamelCase_ ,lowerCamelCase_ = get_sqrt_alpha_prod(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
lowerCamelCase_ = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 42 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = GPTSanJapaneseTokenizer
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = {'do_clean_text': False, 'add_prefix_space': False}
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# fmt: off
lowerCamelCase_ = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
lowerCamelCase_ = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
lowerCamelCase_ = {'unk_token': '<unk>'}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(SCREAMING_SNAKE_CASE_ ) )
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = 'こんにちは、世界。 \nこんばんは、㔺界。😀'
lowerCamelCase_ = 'こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.get_input_output_texts(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
return text, ids
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
# Testing tokenization
lowerCamelCase_ = 'こんにちは、世界。 こんばんは、㔺界。'
lowerCamelCase_ = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
lowerCamelCase_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing conversion to ids without special tokens
lowerCamelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing conversion to ids with special tokens
lowerCamelCase_ = tokens + [tokenizer.unk_token]
lowerCamelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
# Testing tokenization
lowerCamelCase_ = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
lowerCamelCase_ = 'こんにちは、、、、世界。こんばんは、、、、世界。'
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
lowerCamelCase_ = 'こんにちは、世界。'
lowerCamelCase_ = 'こんばんは、㔺界。😀'
lowerCamelCase_ = 'こんにちは、世界。こんばんは、世界。😀'
lowerCamelCase_ = tokenizer.encode(prefix_text + input_text )
lowerCamelCase_ = tokenizer.encode('' , prefix_text=prefix_text + input_text )
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , prefix_text=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
lowerCamelCase_ = 'こんにちは、世界。'
lowerCamelCase_ = 'こんばんは、㔺界。😀'
lowerCamelCase_ = len(tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) - 2
lowerCamelCase_ = len(tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) - 2
lowerCamelCase_ = [1] + [0] * (len_prefix + len_text + 1)
lowerCamelCase_ = [1] * (len_prefix + len_text + 1) + [0]
lowerCamelCase_ = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowerCamelCase_ = tokenizer(prefix_text + input_text ).token_type_ids
lowerCamelCase_ = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , prefix_text=SCREAMING_SNAKE_CASE_ ).token_type_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
lowerCamelCase_ = tokenizer.encode('あンいワ' )
lowerCamelCase_ = tokenizer.encode('' , prefix_text='あンいワ' )
lowerCamelCase_ = tokenizer.encode('いワ' , prefix_text='あン' )
self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
lowerCamelCase_ = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.batch_encode_plus(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
# fmt: off
lowerCamelCase_ = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
lowerCamelCase_ = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowerCamelCase_ = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token.token_type_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token.attention_mask , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.input_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.token_type_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.attention_mask , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
pass
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass
| 42 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"facebook/deit-base-distilled-patch16-224": (
"https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'deit'
def __init__( self , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=224 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=16 , **SCREAMING_SNAKE_CASE_ , ) -> int:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = qkv_bias
lowerCamelCase_ = encoder_stride
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = version.parse('1.11' )
@property
def UpperCamelCase( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCamelCase( self ) -> float:
'''simple docstring'''
return 1E-4
| 42 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
A_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Dict:
'''simple docstring'''
super().__init__()
self.register_modules(
vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ = "auto" ) -> List[str]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> List[str]:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = 1
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = len(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE_ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(SCREAMING_SNAKE_CASE_ )}.''' )
# get prompt text embeddings
lowerCamelCase_ = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
lowerCamelCase_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCamelCase_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowerCamelCase_ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowerCamelCase_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = text_embeddings.shape
lowerCamelCase_ = text_embeddings.repeat(1 , SCREAMING_SNAKE_CASE_ , 1 )
lowerCamelCase_ = text_embeddings.view(bs_embed * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase_ = 42
if negative_prompt is None:
lowerCamelCase_ = ['']
elif type(SCREAMING_SNAKE_CASE_ ) is not type(SCREAMING_SNAKE_CASE_ ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(SCREAMING_SNAKE_CASE_ )} !='''
f''' {type(SCREAMING_SNAKE_CASE_ )}.''' )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = [negative_prompt]
elif batch_size != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(SCREAMING_SNAKE_CASE_ )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
' the batch size of `prompt`.' )
else:
lowerCamelCase_ = negative_prompt
lowerCamelCase_ = text_input_ids.shape[-1]
lowerCamelCase_ = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='pt' , )
lowerCamelCase_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCamelCase_ = uncond_embeddings.shape[1]
lowerCamelCase_ = uncond_embeddings.repeat(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 )
lowerCamelCase_ = uncond_embeddings.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
lowerCamelCase_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCamelCase_ = torch.randn(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='cpu' , dtype=SCREAMING_SNAKE_CASE_ ).to(self.device )
lowerCamelCase_ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='cpu' , dtype=SCREAMING_SNAKE_CASE_ ).to(
self.device )
else:
lowerCamelCase_ = torch.randn(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowerCamelCase_ = latents_reference.to(self.device )
lowerCamelCase_ = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowerCamelCase_ = (latents_shape[3] - latents_shape_reference[3]) // 2
lowerCamelCase_ = (latents_shape[2] - latents_shape_reference[2]) // 2
lowerCamelCase_ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowerCamelCase_ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowerCamelCase_ = 0 if dx < 0 else dx
lowerCamelCase_ = 0 if dy < 0 else dy
lowerCamelCase_ = max(-dx , 0 )
lowerCamelCase_ = max(-dy , 0 )
# import pdb
# pdb.set_trace()
lowerCamelCase_ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCamelCase_ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase_ = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ = {}
if accepts_eta:
lowerCamelCase_ = eta
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
lowerCamelCase_ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCamelCase_ ,lowerCamelCase_ = noise_pred.chunk(2 )
lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 1 / 0.18_215 * latents
lowerCamelCase_ = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
lowerCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
lowerCamelCase_ = self.feature_extractor(self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) , return_tensors='pt' ).to(
self.device )
lowerCamelCase_ ,lowerCamelCase_ = self.safety_checker(
images=SCREAMING_SNAKE_CASE_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
lowerCamelCase_ = None
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ , nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
| 42 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
A_ = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def _UpperCamelCase ( ) -> str:
lowerCamelCase_ = _ask_options(
'In which compute environment are you running?' ,['This machine', 'AWS (Amazon SageMaker)'] ,_convert_compute_environment ,)
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
lowerCamelCase_ = get_sagemaker_input()
else:
lowerCamelCase_ = get_cluster_input()
return config
def _UpperCamelCase ( __UpperCamelCase=None ) -> Union[str, Any]:
if subparsers is not None:
lowerCamelCase_ = subparsers.add_parser('config' ,description=__UpperCamelCase )
else:
lowerCamelCase_ = argparse.ArgumentParser('Accelerate config command' ,description=__UpperCamelCase )
parser.add_argument(
'--config_file' ,default=__UpperCamelCase ,help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) ,)
if subparsers is not None:
parser.set_defaults(func=__UpperCamelCase )
return parser
def _UpperCamelCase ( __UpperCamelCase ) -> Tuple:
lowerCamelCase_ = get_user_input()
if args.config_file is not None:
lowerCamelCase_ = args.config_file
else:
if not os.path.isdir(__UpperCamelCase ):
os.makedirs(__UpperCamelCase )
lowerCamelCase_ = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(__UpperCamelCase )
else:
config.to_yaml_file(__UpperCamelCase )
print(f'''accelerate configuration saved at {config_file}''' )
def _UpperCamelCase ( ) -> List[Any]:
lowerCamelCase_ = config_command_parser()
lowerCamelCase_ = parser.parse_args()
config_command(__UpperCamelCase )
if __name__ == "__main__":
main()
| 42 |
'''simple docstring'''
import pprint
import requests
A_ = "https://zenquotes.io/api"
def _UpperCamelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def _UpperCamelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
A_ = random_quotes()
pprint.pprint(response)
| 42 | 1 |
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
A_ = random.Random()
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase=1.0 ,__UpperCamelCase=None ,__UpperCamelCase=None ) -> Union[str, Any]:
if rng is None:
lowerCamelCase_ = global_rng
lowerCamelCase_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=400 , SCREAMING_SNAKE_CASE_=2000 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=160 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=4000 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = min_seq_length
lowerCamelCase_ = max_seq_length
lowerCamelCase_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase_ = padding_value
lowerCamelCase_ = sampling_rate
lowerCamelCase_ = return_attention_mask
lowerCamelCase_ = do_normalize
lowerCamelCase_ = feature_size
lowerCamelCase_ = chunk_length
lowerCamelCase_ = hop_length
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ) -> Any:
'''simple docstring'''
def _flatten(SCREAMING_SNAKE_CASE_ ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE_ ) )
if equal_length:
lowerCamelCase_ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCamelCase_ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase_ = [np.asarray(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = WhisperFeatureExtractor if is_speech_available() else None
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = WhisperFeatureExtractionTester(self )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ = feat_extract_first.save_pretrained(SCREAMING_SNAKE_CASE_ )[0]
check_json_file_has_correct_format(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.feature_extraction_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = feat_extract_first.to_dict()
lowerCamelCase_ = feat_extract_second.to_dict()
lowerCamelCase_ = feat_extract_first.mel_filters
lowerCamelCase_ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ = os.path.join(SCREAMING_SNAKE_CASE_ , 'feat_extract.json' )
feat_extract_first.to_json_file(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.feature_extraction_class.from_json_file(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = feat_extract_first.to_dict()
lowerCamelCase_ = feat_extract_second.to_dict()
lowerCamelCase_ = feat_extract_first.mel_filters
lowerCamelCase_ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs]
# Test feature size
lowerCamelCase_ = feature_extractor(SCREAMING_SNAKE_CASE_ , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
lowerCamelCase_ = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
lowerCamelCase_ = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
# Test batched
lowerCamelCase_ = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' ).input_features
lowerCamelCase_ = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCamelCase_ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCamelCase_ = np.asarray(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' ).input_features
lowerCamelCase_ = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
# Test truncation required
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
lowerCamelCase_ = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs]
lowerCamelCase_ = [x[: feature_extractor.n_samples] for x in speech_inputs]
lowerCamelCase_ = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs_truncated]
lowerCamelCase_ = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' ).input_features
lowerCamelCase_ = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
import torch
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = np.random.rand(100 , 32 ).astype(np.floataa )
lowerCamelCase_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCamelCase_ = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowerCamelCase_ = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCamelCase_ = ds.sort('id' ).select(range(SCREAMING_SNAKE_CASE_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
lowerCamelCase_ = self._load_datasamples(1 )
lowerCamelCase_ = WhisperFeatureExtractor()
lowerCamelCase_ = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = self._load_datasamples(1 )[0]
lowerCamelCase_ = ((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue
lowerCamelCase_ = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
self.assertTrue(np.all(np.mean(SCREAMING_SNAKE_CASE_ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(SCREAMING_SNAKE_CASE_ ) - 1 ) < 1E-3 ) )
| 42 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=33 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ) -> int:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = EsmModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = EsmForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = EsmForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = ()
SCREAMING_SNAKE_CASE_ = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = True
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = EsmModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase_ = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = EsmModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase_ = EsmEmbeddings(config=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCamelCase_ = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCamelCase_ = create_position_ids_from_input_ids(SCREAMING_SNAKE_CASE_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase_ = EsmEmbeddings(config=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.empty(2 , 4 , 30 )
lowerCamelCase_ = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCamelCase_ = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCamelCase_ = embeddings.create_position_ids_from_inputs_embeds(SCREAMING_SNAKE_CASE_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('Esm does not support embedding resizing' )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
with torch.no_grad():
lowerCamelCase_ = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowerCamelCase_ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )[0]
lowerCamelCase_ = 33
lowerCamelCase_ = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
@slow
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
lowerCamelCase_ = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowerCamelCase_ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )[0]
# compare the actual values for a slice.
lowerCamelCase_ = torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 42 | 1 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = (DDPMScheduler,)
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = {
'num_train_timesteps': 1000,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE_ , prediction_type=SCREAMING_SNAKE_CASE_ , sample_max_value=SCREAMING_SNAKE_CASE_ , )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = len(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter
lowerCamelCase_ = torch.manual_seed(0 )
for t in reversed(range(SCREAMING_SNAKE_CASE_ ) ):
# 1. predict noise residual
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ = pred_prev_sample
lowerCamelCase_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCamelCase_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = len(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter
lowerCamelCase_ = torch.manual_seed(0 )
for t in reversed(range(SCREAMING_SNAKE_CASE_ ) ):
# 1. predict noise residual
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ = pred_prev_sample
lowerCamelCase_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = scheduler.timesteps
for i, timestep in enumerate(SCREAMING_SNAKE_CASE_ ):
if i == len(SCREAMING_SNAKE_CASE_ ) - 1:
lowerCamelCase_ = -1
else:
lowerCamelCase_ = timesteps[i + 1]
lowerCamelCase_ = scheduler.previous_timestep(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = prev_t.item()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = [100, 87, 50, 51, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = [100, 87, 50, 1, 0]
lowerCamelCase_ = len(SCREAMING_SNAKE_CASE_ )
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE_ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
| 42 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 42 | 1 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
'''simple docstring'''
@staticmethod
def UpperCamelCase( *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
pass
def _UpperCamelCase ( __UpperCamelCase ) -> str:
lowerCamelCase_ = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = DepthEstimationPipeline(model=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , SCREAMING_SNAKE_CASE_ )
import datasets
lowerCamelCase_ = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
lowerCamelCase_ = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , SCREAMING_SNAKE_CASE_ , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
pass
@slow
@require_torch
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = 'Intel/dpt-large'
lowerCamelCase_ = pipeline('depth-estimation' , model=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
lowerCamelCase_ = hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 )
@require_torch
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 42 |
'''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
),
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'xlm-prophetnet'
SCREAMING_SNAKE_CASE_ = ['past_key_values']
SCREAMING_SNAKE_CASE_ = {
'num_attention_heads': 'num_encoder_attention_heads',
}
def __init__( self , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = "gelu" , SCREAMING_SNAKE_CASE_ = 30522 , SCREAMING_SNAKE_CASE_ = 1024 , SCREAMING_SNAKE_CASE_ = 4096 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 4096 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 0.02 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 128 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 2 , **SCREAMING_SNAKE_CASE_ , ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = encoder_ffn_dim
lowerCamelCase_ = num_encoder_layers
lowerCamelCase_ = num_encoder_attention_heads
lowerCamelCase_ = decoder_ffn_dim
lowerCamelCase_ = num_decoder_layers
lowerCamelCase_ = num_decoder_attention_heads
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = init_std # Normal(0, this parameter)
lowerCamelCase_ = activation_function
# parameters for xlmprophetnet
lowerCamelCase_ = ngram
lowerCamelCase_ = num_buckets
lowerCamelCase_ = relative_max_distance
lowerCamelCase_ = disable_ngram_loss
lowerCamelCase_ = eps
# 3 Types of Dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = dropout
lowerCamelCase_ = use_cache
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , add_cross_attention=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'
' `num_decoder_layers`.' )
| 42 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
A_ = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["BeitFeatureExtractor"]
A_ = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> float:
lowerCamelCase_ = x
lowerCamelCase_ = y
for step in range(__UpperCamelCase ): # noqa: B007
lowerCamelCase_ = a * a - b * b + x
lowerCamelCase_ = 2 * a * b + y
lowerCamelCase_ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _UpperCamelCase ( __UpperCamelCase ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def _UpperCamelCase ( __UpperCamelCase ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(__UpperCamelCase ,1 ,1 ) )
def _UpperCamelCase ( __UpperCamelCase = 8_00 ,__UpperCamelCase = 6_00 ,__UpperCamelCase = -0.6 ,__UpperCamelCase = 0 ,__UpperCamelCase = 3.2 ,__UpperCamelCase = 50 ,__UpperCamelCase = True ,) -> Image.Image:
lowerCamelCase_ = Image.new('RGB' ,(image_width, image_height) )
lowerCamelCase_ = img.load()
# loop through the image-coordinates
for image_x in range(__UpperCamelCase ):
for image_y in range(__UpperCamelCase ):
# determine the figure-coordinates based on the image-coordinates
lowerCamelCase_ = figure_width / image_width * image_height
lowerCamelCase_ = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCamelCase_ = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCamelCase_ = get_distance(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCamelCase_ = get_color_coded_rgb(__UpperCamelCase )
else:
lowerCamelCase_ = get_black_and_white_rgb(__UpperCamelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
A_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 42 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> Optional[Any]:
if config_name_or_path is None:
lowerCamelCase_ = 'facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base'
if generator_tokenizer_name_or_path is None:
lowerCamelCase_ = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowerCamelCase_ = question_encoder_name_or_path
lowerCamelCase_ = RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration
# Save model.
lowerCamelCase_ = RagConfig.from_pretrained(__UpperCamelCase )
lowerCamelCase_ = AutoConfig.from_pretrained(__UpperCamelCase )
lowerCamelCase_ = AutoConfig.from_pretrained(__UpperCamelCase )
lowerCamelCase_ = gen_config
lowerCamelCase_ = question_encoder_config
lowerCamelCase_ = model_class.from_pretrained_question_encoder_generator(
__UpperCamelCase ,__UpperCamelCase ,config=__UpperCamelCase )
rag_model.save_pretrained(__UpperCamelCase )
# Sanity check.
model_class.from_pretrained(__UpperCamelCase )
# Save tokenizers.
lowerCamelCase_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' )
lowerCamelCase_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
"--model_type",
choices=["rag_sequence", "rag_token"],
required=True,
type=str,
help="RAG model type: rag_sequence, rag_token",
)
parser.add_argument("--dest", type=str, required=True, help="Path to the output checkpoint directory.")
parser.add_argument("--generator_name_or_path", type=str, required=True, help="Generator model identifier")
parser.add_argument(
"--question_encoder_name_or_path", type=str, required=True, help="Question encoder model identifier"
)
parser.add_argument(
"--generator_tokenizer_name_or_path",
type=str,
help="Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``",
)
parser.add_argument(
"--question_encoder_tokenizer_name_or_path",
type=str,
help="Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``",
)
parser.add_argument(
"--config_name_or_path",
type=str,
help=(
"Identifier of the model config to use, if not provided, resolves to a base config for a given"
" ``model_type``"
),
)
A_ = parser.parse_args()
A_ = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 42 |
'''simple docstring'''
from math import isclose, sqrt
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> tuple[float, float, float]:
lowerCamelCase_ = point_y / 4 / point_x
lowerCamelCase_ = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCamelCase_ = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCamelCase_ = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCamelCase_ = outgoing_gradient**2 + 4
lowerCamelCase_ = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCamelCase_ = (point_y - outgoing_gradient * point_x) ** 2 - 1_00
lowerCamelCase_ = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCamelCase_ = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCamelCase_ = x_minus if isclose(__UpperCamelCase ,__UpperCamelCase ) else x_plus
lowerCamelCase_ = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _UpperCamelCase ( __UpperCamelCase = 1.4 ,__UpperCamelCase = -9.6 ) -> int:
lowerCamelCase_ = 0
lowerCamelCase_ = first_x_coord
lowerCamelCase_ = first_y_coord
lowerCamelCase_ = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = next_point(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 42 | 1 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def _UpperCamelCase ( __UpperCamelCase ) -> List[str]:
if hor == 1_28:
lowerCamelCase_ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
lowerCamelCase_ = (32, 1_28, 2_56)
lowerCamelCase_ = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
lowerCamelCase_ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
lowerCamelCase_ = (32, 64, 1_28, 2_56)
lowerCamelCase_ = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
lowerCamelCase_ = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
lowerCamelCase_ = model.state_dict()
lowerCamelCase_ = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 6_55_36,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
lowerCamelCase_ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase_ = dict(zip(model.state_dict().keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase_ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() ,f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCamelCase ( ) -> Tuple:
lowerCamelCase_ = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 1_28, 2_56),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 6_55_36,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
lowerCamelCase_ = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
lowerCamelCase_ = model
lowerCamelCase_ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase_ = dict(zip(state_dict.keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase_ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() ,'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 42 |
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = False ) -> bool:
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_31_70_44_06_46_79_88_73_85_96_19_81 and not allow_probable:
raise ValueError(
'Warning: upper bound of deterministic test is exceeded. '
'Pass allow_probable=True to allow probabilistic test. '
'A return value of True indicates a probable prime.' )
# array bounds provided by analysis
lowerCamelCase_ = [
20_47,
1_37_36_53,
25_32_60_01,
32_15_03_17_51,
2_15_23_02_89_87_47,
3_47_47_49_66_03_83,
3_41_55_00_71_72_83_21,
1,
3_82_51_23_05_65_46_41_30_51,
1,
1,
31_86_65_85_78_34_03_11_51_16_74_61,
3_31_70_44_06_46_79_88_73_85_96_19_81,
]
lowerCamelCase_ = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(__UpperCamelCase ,1 ):
if n < _p:
# then we have our last prime to check
lowerCamelCase_ = primes[:idx]
break
lowerCamelCase_ ,lowerCamelCase_ = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowerCamelCase_ = False
for r in range(__UpperCamelCase ):
lowerCamelCase_ = pow(__UpperCamelCase ,d * 2**r ,__UpperCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowerCamelCase_ = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def _UpperCamelCase ( ) -> None:
assert not miller_rabin(5_61 )
assert miller_rabin(5_63 )
# 2047
assert not miller_rabin(83_82_01 )
assert miller_rabin(83_82_07 )
# 1_373_653
assert not miller_rabin(17_31_60_01 )
assert miller_rabin(17_31_60_17 )
# 25_326_001
assert not miller_rabin(30_78_38_66_41 )
assert miller_rabin(30_78_38_66_53 )
# 3_215_031_751
assert not miller_rabin(1_71_30_45_57_48_01 )
assert miller_rabin(1_71_30_45_57_48_19 )
# 2_152_302_898_747
assert not miller_rabin(2_77_97_99_72_83_07 )
assert miller_rabin(2_77_97_99_72_83_27 )
# 3_474_749_660_383
assert not miller_rabin(1_13_85_00_23_90_94_41 )
assert miller_rabin(1_13_85_00_23_90_95_27 )
# 341_550_071_728_321
assert not miller_rabin(1_27_50_41_01_88_48_80_43_51 )
assert miller_rabin(1_27_50_41_01_88_48_80_43_91 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_96_66_46_44_58_50_77_87_79_18_67 )
assert miller_rabin(7_96_66_46_44_58_50_77_87_79_19_51 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(55_28_40_67_74_46_64_78_97_66_03_33 )
assert miller_rabin(55_28_40_67_74_46_64_78_97_66_03_59 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 42 | 1 |
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> int:
while second != 0:
lowerCamelCase_ = first & second
first ^= second
lowerCamelCase_ = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ = int(input("Enter the first number: ").strip())
A_ = int(input("Enter the second number: ").strip())
print(f'''{add(first, second) = }''')
| 42 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A_ = 16
A_ = 32
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = 16 ,__UpperCamelCase = "bert-base-cased" ) -> List[Any]:
lowerCamelCase_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
lowerCamelCase_ = load_dataset('glue' ,'mrpc' )
def tokenize_function(__UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ = tokenizer(examples['sentence1'] ,examples['sentence2'] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase_ = datasets.map(
__UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=['idx', 'sentence1', 'sentence2'] ,load_from_cache_file=__UpperCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ = tokenized_datasets.rename_column('label' ,'labels' )
def collate_fn(__UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCamelCase ,padding='max_length' ,max_length=1_28 ,return_tensors='pt' )
return tokenizer.pad(__UpperCamelCase ,padding='longest' ,return_tensors='pt' )
# Instantiate dataloaders.
lowerCamelCase_ = DataLoader(
tokenized_datasets['train'] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
lowerCamelCase_ = DataLoader(
tokenized_datasets['validation'] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
model.eval()
lowerCamelCase_ = 0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ = model(**__UpperCamelCase )
lowerCamelCase_ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCamelCase_ ,lowerCamelCase_ = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__UpperCamelCase ) - 1:
lowerCamelCase_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCamelCase_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__UpperCamelCase ,references=__UpperCamelCase ,)
lowerCamelCase_ = metric.compute()
return eval_metric["accuracy"]
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> List[str]:
# Initialize accelerator
lowerCamelCase_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ = config['lr']
lowerCamelCase_ = int(config['num_epochs'] )
lowerCamelCase_ = int(config['seed'] )
lowerCamelCase_ = int(config['batch_size'] )
lowerCamelCase_ = args.model_name_or_path
set_seed(__UpperCamelCase )
lowerCamelCase_ ,lowerCamelCase_ = get_dataloaders(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained(__UpperCamelCase ,return_dict=__UpperCamelCase )
# Instantiate optimizer
lowerCamelCase_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase_ = optimizer_cls(params=model.parameters() ,lr=__UpperCamelCase )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase_ = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowerCamelCase_ = 1
lowerCamelCase_ = (len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase_ = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase ,num_warmup_steps=0 ,num_training_steps=__UpperCamelCase ,)
else:
lowerCamelCase_ = DummyScheduler(__UpperCamelCase ,total_num_steps=__UpperCamelCase ,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = accelerator.prepare(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase_ = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase_ = 0
lowerCamelCase_ = evaluate.load('glue' ,'mrpc' )
lowerCamelCase_ = num_epochs
if args.partial_train_epoch is not None:
lowerCamelCase_ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCamelCase_ = args.resume_from_checkpoint.split('epoch_' )[1]
lowerCamelCase_ = ''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCamelCase_ = int(__UpperCamelCase ) + 1
lowerCamelCase_ = evaluation_loop(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
accelerator.print('resumed checkpoint performance:' ,__UpperCamelCase )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' ,lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' ,optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir ,f'''state_{starting_epoch-1}.json''' ) ,'r' ) as f:
lowerCamelCase_ = json.load(__UpperCamelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCamelCase_ = {}
for epoch in range(__UpperCamelCase ,__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
lowerCamelCase_ = model(**__UpperCamelCase )
lowerCamelCase_ = outputs.loss
lowerCamelCase_ = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCamelCase_ = f'''epoch_{epoch}'''
lowerCamelCase_ = os.path.join(args.output_dir ,__UpperCamelCase )
accelerator.save_state(__UpperCamelCase )
lowerCamelCase_ = evaluation_loop(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
lowerCamelCase_ = accuracy
lowerCamelCase_ = lr_scheduler.get_lr()[0]
lowerCamelCase_ = optimizer.param_groups[0]['lr']
lowerCamelCase_ = epoch
lowerCamelCase_ = overall_step
accelerator.print(f'''epoch {epoch}:''' ,__UpperCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,f'''state_{epoch}.json''' ) ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCamelCase ( ) -> str:
lowerCamelCase_ = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' ,type=__UpperCamelCase ,default='bert-base-cased' ,help='Path to pretrained model or model identifier from huggingface.co/models.' ,required=__UpperCamelCase ,)
parser.add_argument(
'--output_dir' ,type=__UpperCamelCase ,default='.' ,help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' ,)
parser.add_argument(
'--resume_from_checkpoint' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help='If the training should continue from a checkpoint folder.' ,)
parser.add_argument(
'--partial_train_epoch' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help='If passed, the training will stop after this number of epochs.' ,)
parser.add_argument(
'--num_epochs' ,type=__UpperCamelCase ,default=2 ,help='Number of train epochs.' ,)
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
main()
| 42 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = ['transformers', 'torch', 'note_seq']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def UpperCamelCase( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def UpperCamelCase( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 42 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> np.ndarray:
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
lowerCamelCase_ = ksize + 1
lowerCamelCase_ = np.zeros((ksize, ksize) ,dtype=np.floataa )
# each value
for y in range(__UpperCamelCase ):
for x in range(__UpperCamelCase ):
# distance from center
lowerCamelCase_ = x - ksize // 2
lowerCamelCase_ = y - ksize // 2
# degree to radiant
lowerCamelCase_ = theta / 1_80 * np.pi
lowerCamelCase_ = np.cos(_theta )
lowerCamelCase_ = np.sin(_theta )
# get kernel x
lowerCamelCase_ = cos_theta * px + sin_theta * py
# get kernel y
lowerCamelCase_ = -sin_theta * px + cos_theta * py
# fill kernel
lowerCamelCase_ = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
A_ = imread("../image_data/lena.jpg")
# turn image in gray scale value
A_ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
A_ = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
A_ = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
A_ = out / out.max() * 255
A_ = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 42 | 1 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase :
'''simple docstring'''
@staticmethod
def UpperCamelCase( *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
pass
def _UpperCamelCase ( __UpperCamelCase ) -> List[Any]:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
A_ = (
"https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = pipeline(
'document-question-answering' , model=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = list(zip(*apply_tesseract(load_image(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , '' ) ) )
lowerCamelCase_ = 'What is the placebo?'
lowerCamelCase_ = [
{
'image': load_image(SCREAMING_SNAKE_CASE_ ),
'question': question,
},
{
'image': image,
'question': question,
},
{
'image': image,
'question': question,
'word_boxes': word_boxes,
},
]
return dqa_pipeline, examples
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = dqa_pipeline(SCREAMING_SNAKE_CASE_ , top_k=2 )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
[
{'score': ANY(SCREAMING_SNAKE_CASE_ ), 'answer': ANY(SCREAMING_SNAKE_CASE_ ), 'start': ANY(SCREAMING_SNAKE_CASE_ ), 'end': ANY(SCREAMING_SNAKE_CASE_ )},
{'score': ANY(SCREAMING_SNAKE_CASE_ ), 'answer': ANY(SCREAMING_SNAKE_CASE_ ), 'start': ANY(SCREAMING_SNAKE_CASE_ ), 'end': ANY(SCREAMING_SNAKE_CASE_ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2' )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = 'How many cats are there?'
lowerCamelCase_ = [
{'score': 0.0_001, 'answer': 'oy 2312/2019', 'start': 38, 'end': 39},
{'score': 0.0_001, 'answer': 'oy 2312/2019 DUE', 'start': 38, 'end': 40},
]
lowerCamelCase_ = dqa_pipeline(image=SCREAMING_SNAKE_CASE_ , question=SCREAMING_SNAKE_CASE_ , top_k=2 )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , SCREAMING_SNAKE_CASE_ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowerCamelCase_ = './tests/fixtures/tests_samples/COCO/000000039769.png'
lowerCamelCase_ = dqa_pipeline(image=SCREAMING_SNAKE_CASE_ , question=SCREAMING_SNAKE_CASE_ , top_k=2 )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [] )
# We can optionnally pass directly the words and bounding boxes
lowerCamelCase_ = './tests/fixtures/tests_samples/COCO/000000039769.png'
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = dqa_pipeline(image=SCREAMING_SNAKE_CASE_ , question=SCREAMING_SNAKE_CASE_ , words=SCREAMING_SNAKE_CASE_ , boxes=SCREAMING_SNAKE_CASE_ , top_k=2 )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = 'What is the invoice number?'
lowerCamelCase_ = dqa_pipeline(image=SCREAMING_SNAKE_CASE_ , question=SCREAMING_SNAKE_CASE_ , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'score': 0.9_944, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_009, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowerCamelCase_ = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'score': 0.9_944, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_009, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowerCamelCase_ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[
{'score': 0.9_944, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_009, 'answer': 'us-001', 'start': 16, 'end': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=50 , )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = 'What is the invoice number?'
lowerCamelCase_ = dqa_pipeline(image=SCREAMING_SNAKE_CASE_ , question=SCREAMING_SNAKE_CASE_ , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'score': 0.9_974, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_948, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowerCamelCase_ = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'score': 0.9_974, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_948, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowerCamelCase_ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[
{'score': 0.9_974, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_948, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=SCREAMING_SNAKE_CASE_ , revision='3dc6de3' , )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = 'What is the invoice number?'
lowerCamelCase_ = dqa_pipeline(image=SCREAMING_SNAKE_CASE_ , question=SCREAMING_SNAKE_CASE_ , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'score': 0.4_251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_819, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
lowerCamelCase_ = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'score': 0.4_251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_819, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
lowerCamelCase_ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[
{'score': 0.4_251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_819, 'answer': '1110212019', 'start': 23, 'end': 23},
]
]
* 2 , )
lowerCamelCase_ = list(zip(*apply_tesseract(load_image(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , '' ) ) )
# This model should also work if `image` is set to None
lowerCamelCase_ = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'score': 0.4_251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_819, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=SCREAMING_SNAKE_CASE_ , revision='3dc6de3' , max_seq_len=50 , )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = 'What is the invoice number?'
lowerCamelCase_ = dqa_pipeline(image=SCREAMING_SNAKE_CASE_ , question=SCREAMING_SNAKE_CASE_ , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'score': 0.9_999, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_998, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowerCamelCase_ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[
{'score': 0.9_999, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_998, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
lowerCamelCase_ = list(zip(*apply_tesseract(load_image(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , '' ) ) )
# This model should also work if `image` is set to None
lowerCamelCase_ = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'score': 0.9_999, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_998, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
@slow
@require_torch
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = pipeline(
'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = 'What is the invoice number?'
lowerCamelCase_ = dqa_pipeline(image=SCREAMING_SNAKE_CASE_ , question=SCREAMING_SNAKE_CASE_ , top_k=2 )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [{'answer': 'us-001'}] )
@require_tf
@unittest.skip('Document question answering not implemented in TF' )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass
| 42 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = ['transformers', 'torch', 'note_seq']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def UpperCamelCase( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def UpperCamelCase( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 42 | 1 |
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
A_ = 10
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int:
for i in range(__UpperCamelCase ,__UpperCamelCase ):
if array[i] == target:
return i
return -1
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> int:
lowerCamelCase_ = 0
lowerCamelCase_ = len(__UpperCamelCase )
while left <= right:
if right - left < precision:
return lin_search(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
lowerCamelCase_ = (left + right) // 3 + 1
lowerCamelCase_ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowerCamelCase_ = one_third - 1
elif array[two_third] < target:
lowerCamelCase_ = two_third + 1
else:
lowerCamelCase_ = one_third + 1
lowerCamelCase_ = two_third - 1
else:
return -1
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int:
if left < right:
if right - left < precision:
return lin_search(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
lowerCamelCase_ = (left + right) // 3 + 1
lowerCamelCase_ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(__UpperCamelCase ,one_third - 1 ,__UpperCamelCase ,__UpperCamelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
else:
return rec_ternary_search(one_third + 1 ,two_third - 1 ,__UpperCamelCase ,__UpperCamelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ = input("Enter numbers separated by comma:\n").strip()
A_ = [int(item.strip()) for item in user_input.split(",")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
A_ = int(input("Enter the number to be found in the list:\n").strip())
A_ = ite_ternary_search(collection, target)
A_ = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print("Not found")
| 42 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return model
@property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , cross_attention_dim=10 , )
return model
@property
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , )
lowerCamelCase_ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return vqvae, unet
@slow
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
lowerCamelCase_ = DDPMScheduler()
lowerCamelCase_ = AudioDiffusionPipeline(vqvae=SCREAMING_SNAKE_CASE_ , unet=self.dummy_unet , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , steps=4 )
lowerCamelCase_ = output.audios[0]
lowerCamelCase_ = output.images[0]
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , steps=4 , return_dict=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.frombuffer(image_from_tuple.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowerCamelCase_ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
lowerCamelCase_ = DDIMScheduler()
lowerCamelCase_ = self.dummy_vqvae_and_unet
lowerCamelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
np.random.seed(0 )
lowerCamelCase_ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(raw_audio=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , start_step=5 , steps=10 )
lowerCamelCase_ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowerCamelCase_ = self.dummy_unet_condition
lowerCamelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=SCREAMING_SNAKE_CASE_ , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
np.random.seed(0 )
lowerCamelCase_ = torch.rand((1, 1, 10) )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , encoding=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output.images[0]
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = torch_device
lowerCamelCase_ = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output.audios[0]
lowerCamelCase_ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 42 | 1 |
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase ) -> Dict:
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def _UpperCamelCase ( __UpperCamelCase ) -> list[tuple[int, int]]:
lowerCamelCase_ = 0
lowerCamelCase_ = len(__UpperCamelCase ) # No of vertices in graph
lowerCamelCase_ = [0] * n
lowerCamelCase_ = [False] * n
def dfs(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ):
lowerCamelCase_ = True
lowerCamelCase_ = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,id_ )
lowerCamelCase_ = min(low[at] ,low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowerCamelCase_ = min(low[at] ,low[to] )
lowerCamelCase_ = []
for i in range(__UpperCamelCase ):
if not visited[i]:
dfs(__UpperCamelCase ,-1 ,__UpperCamelCase ,id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 |
'''simple docstring'''
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _UpperCamelCase ( __UpperCamelCase = 8 ) -> str:
lowerCamelCase_ = ascii_letters + digits + punctuation
return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(__UpperCamelCase )
lowerCamelCase_ = i // 3
lowerCamelCase_ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowerCamelCase_ = (
chars_incl
+ random(__UpperCamelCase ,quotient + remainder )
+ random(__UpperCamelCase ,__UpperCamelCase )
+ random(__UpperCamelCase ,__UpperCamelCase )
)
lowerCamelCase_ = list(__UpperCamelCase )
shuffle(__UpperCamelCase )
return "".join(__UpperCamelCase )
# random is a generalised function for letters, characters and numbers
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
pass # Put your code here...
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Tuple:
pass # Put your code here...
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
pass # Put your code here...
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = 8 ) -> bool:
if len(__UpperCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
lowerCamelCase_ = any(char in ascii_uppercase for char in password )
lowerCamelCase_ = any(char in ascii_lowercase for char in password )
lowerCamelCase_ = any(char in digits for char in password )
lowerCamelCase_ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _UpperCamelCase ( ) -> Optional[int]:
lowerCamelCase_ = int(input('Please indicate the max length of your password: ' ).strip() )
lowerCamelCase_ = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' ,password_generator(__UpperCamelCase ) )
print(
'Alternative Password generated:' ,alternative_password_generator(__UpperCamelCase ,__UpperCamelCase ) ,)
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 42 | 1 |
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> np.array:
lowerCamelCase_ = f'''{sampling_rate}'''
lowerCamelCase_ = '1'
lowerCamelCase_ = 'f32le'
lowerCamelCase_ = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(__UpperCamelCase ,stdin=subprocess.PIPE ,stdout=subprocess.PIPE ) as ffmpeg_process:
lowerCamelCase_ = ffmpeg_process.communicate(__UpperCamelCase )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
lowerCamelCase_ = output_stream[0]
lowerCamelCase_ = np.frombuffer(__UpperCamelCase ,np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = "f32le" ,) -> Union[str, Any]:
lowerCamelCase_ = f'''{sampling_rate}'''
lowerCamelCase_ = '1'
if format_for_conversion == "s16le":
lowerCamelCase_ = 2
elif format_for_conversion == "f32le":
lowerCamelCase_ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
lowerCamelCase_ = platform.system()
if system == "Linux":
lowerCamelCase_ = 'alsa'
lowerCamelCase_ = 'default'
elif system == "Darwin":
lowerCamelCase_ = 'avfoundation'
lowerCamelCase_ = ':0'
elif system == "Windows":
lowerCamelCase_ = 'dshow'
lowerCamelCase_ = 'default'
lowerCamelCase_ = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
lowerCamelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowerCamelCase_ = _ffmpeg_stream(__UpperCamelCase ,__UpperCamelCase )
for item in iterator:
yield item
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = "f32le" ,) -> Any:
if stream_chunk_s is not None:
lowerCamelCase_ = stream_chunk_s
else:
lowerCamelCase_ = chunk_length_s
lowerCamelCase_ = ffmpeg_microphone(__UpperCamelCase ,__UpperCamelCase ,format_for_conversion=__UpperCamelCase )
if format_for_conversion == "s16le":
lowerCamelCase_ = np.intaa
lowerCamelCase_ = 2
elif format_for_conversion == "f32le":
lowerCamelCase_ = np.floataa
lowerCamelCase_ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
lowerCamelCase_ = chunk_length_s / 6
lowerCamelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__UpperCamelCase ,(int, float) ):
lowerCamelCase_ = [stride_length_s, stride_length_s]
lowerCamelCase_ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowerCamelCase_ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowerCamelCase_ = datetime.datetime.now()
lowerCamelCase_ = datetime.timedelta(seconds=__UpperCamelCase )
for item in chunk_bytes_iter(__UpperCamelCase ,__UpperCamelCase ,stride=(stride_left, stride_right) ,stream=__UpperCamelCase ):
# Put everything back in numpy scale
lowerCamelCase_ = np.frombuffer(item['raw'] ,dtype=__UpperCamelCase )
lowerCamelCase_ = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
lowerCamelCase_ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = False ) -> Optional[Any]:
lowerCamelCase_ = b''
lowerCamelCase_ ,lowerCamelCase_ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
lowerCamelCase_ = 0
for raw in iterator:
acc += raw
if stream and len(__UpperCamelCase ) < chunk_len:
lowerCamelCase_ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__UpperCamelCase ) >= chunk_len:
# We are flushing the accumulator
lowerCamelCase_ = (_stride_left, stride_right)
lowerCamelCase_ = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
lowerCamelCase_ = False
yield item
lowerCamelCase_ = stride_left
lowerCamelCase_ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__UpperCamelCase ) > stride_left:
lowerCamelCase_ = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
lowerCamelCase_ = False
yield item
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
lowerCamelCase_ = 2**24 # 16Mo
try:
with subprocess.Popen(__UpperCamelCase ,stdout=subprocess.PIPE ,bufsize=__UpperCamelCase ) as ffmpeg_process:
while True:
lowerCamelCase_ = ffmpeg_process.stdout.read(__UpperCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 42 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = inspect.getfile(accelerate.test_utils )
lowerCamelCase_ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase_ = test_metrics
@require_cpu
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
debug_launcher(self.test_metrics.main )
@require_single_gpu
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
print(f'''Found {torch.cuda.device_count()} devices.''' )
lowerCamelCase_ = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
| 42 | 1 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCamelCase_ = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = -1
lowerCamelCase_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase_ = TextStreamer(SCREAMING_SNAKE_CASE_ )
model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE_ , streamer=SCREAMING_SNAKE_CASE_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase_ = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCamelCase_ = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = -1
lowerCamelCase_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(greedy_ids[0] )
lowerCamelCase_ = TextIteratorStreamer(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
lowerCamelCase_ = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE_ )
thread.start()
lowerCamelCase_ = ''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCamelCase_ = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = -1
lowerCamelCase_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = greedy_ids[:, input_ids.shape[1] :]
lowerCamelCase_ = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase_ = TextStreamer(SCREAMING_SNAKE_CASE_ , skip_prompt=SCREAMING_SNAKE_CASE_ )
model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE_ , streamer=SCREAMING_SNAKE_CASE_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase_ = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = AutoTokenizer.from_pretrained('distilgpt2' )
lowerCamelCase_ = AutoModelForCausalLM.from_pretrained('distilgpt2' ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = -1
lowerCamelCase_ = torch.ones((1, 5) , device=SCREAMING_SNAKE_CASE_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCamelCase_ = TextStreamer(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=1 , do_sample=SCREAMING_SNAKE_CASE_ , streamer=SCREAMING_SNAKE_CASE_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCamelCase_ = cs.out[:-1] # Remove the final "\n"
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCamelCase_ = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = -1
lowerCamelCase_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = TextIteratorStreamer(SCREAMING_SNAKE_CASE_ , timeout=0.001 )
lowerCamelCase_ = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
lowerCamelCase_ = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = ''
for new_text in streamer:
streamer_text += new_text
| 42 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def _UpperCamelCase ( __UpperCamelCase ) -> List[str]:
if hor == 1_28:
lowerCamelCase_ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
lowerCamelCase_ = (32, 1_28, 2_56)
lowerCamelCase_ = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
lowerCamelCase_ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
lowerCamelCase_ = (32, 64, 1_28, 2_56)
lowerCamelCase_ = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
lowerCamelCase_ = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
lowerCamelCase_ = model.state_dict()
lowerCamelCase_ = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 6_55_36,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
lowerCamelCase_ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase_ = dict(zip(model.state_dict().keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase_ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() ,f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCamelCase ( ) -> Tuple:
lowerCamelCase_ = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 1_28, 2_56),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 6_55_36,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
lowerCamelCase_ = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
lowerCamelCase_ = model
lowerCamelCase_ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase_ = dict(zip(state_dict.keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase_ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() ,'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 42 | 1 |
'''simple docstring'''
from collections import defaultdict
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
lowerCamelCase_ = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(SCREAMING_SNAKE_CASE_ ) )
]
lowerCamelCase_ = defaultdict(SCREAMING_SNAKE_CASE_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
lowerCamelCase_ = (1 << len(SCREAMING_SNAKE_CASE_ )) - 1
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
lowerCamelCase_ = self.count_ways_until(SCREAMING_SNAKE_CASE_ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
lowerCamelCase_ = total_ways_util
return self.dp[mask][task_no]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
for j in task_performed[i]:
self.task[j].append(SCREAMING_SNAKE_CASE_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
A_ = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
A_ = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 42 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A_ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A_ = 250_004
A_ = 250_020
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = MBartTokenizer
SCREAMING_SNAKE_CASE_ = MBartTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ = MBartTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = MBartTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCamelCase_ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase_ = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
lowerCamelCase_ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=True
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=False
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'facebook/mbart-large-en-ro'
SCREAMING_SNAKE_CASE_ = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
SCREAMING_SNAKE_CASE_ = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
SCREAMING_SNAKE_CASE_ = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE]
@classmethod
def UpperCamelCase( cls ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
lowerCamelCase_ = 1
return cls
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 250020 )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
self.assertIn(SCREAMING_SNAKE_CASE_ , self.tokenizer.all_special_ids )
lowerCamelCase_ = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
lowerCamelCase_ = self.tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 10
lowerCamelCase_ = self.tokenizer(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [250026, 250001] )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = MBartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , SCREAMING_SNAKE_CASE_ )
@require_torch
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
lowerCamelCase_ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
lowerCamelCase_ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
lowerCamelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(self.src_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=3 , return_tensors='pt' )
lowerCamelCase_ = self.tokenizer(
text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=10 , return_tensors='pt' )
lowerCamelCase_ = targets['input_ids']
lowerCamelCase_ = shift_tokens_right(SCREAMING_SNAKE_CASE_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , {
# A, test, EOS, en_XX
'input_ids': [[62, 3034, 2, 250004]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 250001,
} , )
| 42 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'dandelin/vilt-b32-finetuned-vqa'
SCREAMING_SNAKE_CASE_ = (
'This is a tool that answers a question about an image. It takes an input named `image` which should be the '
'image containing the information, as well as a `question` which should be the question in English. It '
'returns a text that is the answer to the question.'
)
SCREAMING_SNAKE_CASE_ = 'image_qa'
SCREAMING_SNAKE_CASE_ = AutoProcessor
SCREAMING_SNAKE_CASE_ = AutoModelForVisualQuestionAnswering
SCREAMING_SNAKE_CASE_ = ['image', 'text']
SCREAMING_SNAKE_CASE_ = ['text']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['vision'] )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
return self.pre_processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
with torch.no_grad():
return self.model(**SCREAMING_SNAKE_CASE_ ).logits
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 42 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = 'ylacombe/bark-small'
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = 'en_speaker_1'
lowerCamelCase_ = 'This is a test string'
lowerCamelCase_ = 'speaker_embeddings_path.json'
lowerCamelCase_ = 'speaker_embeddings'
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowerCamelCase_ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCamelCase_ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowerCamelCase_ = 35
lowerCamelCase_ = 2
lowerCamelCase_ = 8
lowerCamelCase_ = {
'semantic_prompt': np.ones(SCREAMING_SNAKE_CASE_ ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowerCamelCase_ = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowerCamelCase_ = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowerCamelCase_ = processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = processor(text=self.input_string )
lowerCamelCase_ = tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 42 | 1 |
'''simple docstring'''
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = None
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase_ = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ = os.path.join(SCREAMING_SNAKE_CASE_ , 'feat_extract.json' )
feat_extract_first.to_json_file(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.feature_extraction_class.from_json_file(SCREAMING_SNAKE_CASE_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ = feat_extract_first.save_pretrained(SCREAMING_SNAKE_CASE_ )[0]
check_json_file_has_correct_format(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.feature_extraction_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = self.feature_extraction_class()
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
| 42 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
A_ = {
"vocab_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
},
"merges_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
},
}
A_ = {
"allenai/longformer-base-4096": 4_096,
"allenai/longformer-large-4096": 4_096,
"allenai/longformer-large-4096-finetuned-triviaqa": 4_096,
"allenai/longformer-base-4096-extra.pos.embd.only": 4_096,
"allenai/longformer-large-4096-extra.pos.embd.only": 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCamelCase ( ) -> Any:
lowerCamelCase_ = (
list(range(ord('!' ) ,ord('~' ) + 1 ) ) + list(range(ord('¡' ) ,ord('¬' ) + 1 ) ) + list(range(ord('®' ) ,ord('ÿ' ) + 1 ) )
)
lowerCamelCase_ = bs[:]
lowerCamelCase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCamelCase )
cs.append(2**8 + n )
n += 1
lowerCamelCase_ = [chr(__UpperCamelCase ) for n in cs]
return dict(zip(__UpperCamelCase ,__UpperCamelCase ) )
def _UpperCamelCase ( __UpperCamelCase ) -> List[str]:
lowerCamelCase_ = set()
lowerCamelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase_ = char
return pairs
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask']
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="replace" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as vocab_handle:
lowerCamelCase_ = json.load(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {v: k for k, v in self.encoder.items()}
lowerCamelCase_ = errors # how to handle errors in decoding
lowerCamelCase_ = bytes_to_unicode()
lowerCamelCase_ = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as merges_handle:
lowerCamelCase_ = merges_handle.read().split('\n' )[1:-1]
lowerCamelCase_ = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCamelCase_ = {}
lowerCamelCase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
return len(self.encoder )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCamelCase_ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
lowerCamelCase_ = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_ ,lowerCamelCase_ = bigram
lowerCamelCase_ = []
lowerCamelCase_ = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
lowerCamelCase_ = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase_ = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
lowerCamelCase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = ' '.join(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = word
return word
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
lowerCamelCase_ = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(' ' ) )
return bpe_tokens
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = ''.join(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '\n' )
lowerCamelCase_ = 0
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
lowerCamelCase_ = token_index
writer.write(' '.join(SCREAMING_SNAKE_CASE_ ) + '\n' )
index += 1
return vocab_file, merge_file
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
lowerCamelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()):
lowerCamelCase_ = ' ' + text
return (text, kwargs)
| 42 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json",
"uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json",
"uclanlp/visualbert-vqa-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json",
"uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json",
"uclanlp/visualbert-vcr-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'visual_bert'
def __init__( self , SCREAMING_SNAKE_CASE_=30522 , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , **SCREAMING_SNAKE_CASE_ , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = hidden_size
lowerCamelCase_ = visual_embedding_dim
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = initializer_range
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = bypass_transformer
lowerCamelCase_ = special_visual_initialize
| 42 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
A_ = {
"vocab_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt",
},
"tokenizer_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"
),
"google/realm-orqa-nq-openqa": (
"https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-nq-reader": (
"https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-openqa": (
"https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-reader": (
"https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"
),
},
}
A_ = {
"google/realm-cc-news-pretrained-embedder": 512,
"google/realm-cc-news-pretrained-encoder": 512,
"google/realm-cc-news-pretrained-scorer": 512,
"google/realm-cc-news-pretrained-openqa": 512,
"google/realm-orqa-nq-openqa": 512,
"google/realm-orqa-nq-reader": 512,
"google/realm-orqa-wq-openqa": 512,
"google/realm-orqa-wq-reader": 512,
}
A_ = {
"google/realm-cc-news-pretrained-embedder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-encoder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-scorer": {"do_lower_case": True},
"google/realm-cc-news-pretrained-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-reader": {"do_lower_case": True},
"google/realm-orqa-wq-openqa": {"do_lower_case": True},
"google/realm-orqa-wq-reader": {"do_lower_case": True},
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = RealmTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="[UNK]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[PAD]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> List[Any]:
'''simple docstring'''
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE_ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE_ ) != tokenize_chinese_chars
):
lowerCamelCase_ = getattr(SCREAMING_SNAKE_CASE_ , normalizer_state.pop('type' ) )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = strip_accents
lowerCamelCase_ = tokenize_chinese_chars
lowerCamelCase_ = normalizer_class(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = do_lower_case
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = PaddingStrategy.MAX_LENGTH
lowerCamelCase_ = text
lowerCamelCase_ = kwargs.pop('text_pair' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = kwargs.pop('return_tensors' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(SCREAMING_SNAKE_CASE_ ):
if batch_text_pair is not None:
lowerCamelCase_ = batch_text_pair[idx]
else:
lowerCamelCase_ = None
lowerCamelCase_ = super().__call__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = encoded_candidates.get('input_ids' )
lowerCamelCase_ = encoded_candidates.get('attention_mask' )
lowerCamelCase_ = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(SCREAMING_SNAKE_CASE_ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(SCREAMING_SNAKE_CASE_ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {key: item for key, item in output_data.items() if len(SCREAMING_SNAKE_CASE_ ) != 0}
return BatchEncoding(SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
'''simple docstring'''
lowerCamelCase_ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 42 | 1 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 42 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __UpperCamelCase ) -> bool:
lowerCamelCase_ = str(__UpperCamelCase )
return len(__UpperCamelCase ) == 9 and set(__UpperCamelCase ) == set('123456789' )
def _UpperCamelCase ( ) -> int | None:
for base_num in range(99_99 ,49_99 ,-1 ):
lowerCamelCase_ = 10_00_02 * base_num
if is_9_pandigital(__UpperCamelCase ):
return candidate
for base_num in range(3_33 ,99 ,-1 ):
lowerCamelCase_ = 1_00_20_03 * base_num
if is_9_pandigital(__UpperCamelCase ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 42 | 1 |
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = 0
lowerCamelCase_ = [0]
lowerCamelCase_ = [0]
lowerCamelCase_ = len(SCREAMING_SNAKE_CASE_ )
self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , 0 )
lowerCamelCase_ = [60]
lowerCamelCase_ = [10]
lowerCamelCase_ = len(SCREAMING_SNAKE_CASE_ )
self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , 0 )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = 3
lowerCamelCase_ = [1, 2, 3]
lowerCamelCase_ = [3, 2, 1]
lowerCamelCase_ = len(SCREAMING_SNAKE_CASE_ )
self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , 5 )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = 50
lowerCamelCase_ = [60, 100, 120]
lowerCamelCase_ = [10, 20, 30]
lowerCamelCase_ = len(SCREAMING_SNAKE_CASE_ )
self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , 220 )
if __name__ == "__main__":
unittest.main()
| 42 |
'''simple docstring'''
A_ = "Input must be a string of 8 numbers plus letter"
A_ = "TRWAGMYFPDXBNJZSQVHLCKE"
def _UpperCamelCase ( __UpperCamelCase ) -> bool:
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowerCamelCase_ = f'''Expected string as input, found {type(__UpperCamelCase ).__name__}'''
raise TypeError(__UpperCamelCase )
lowerCamelCase_ = spanish_id.replace('-' ,'' ).upper()
if len(__UpperCamelCase ) != 9:
raise ValueError(__UpperCamelCase )
try:
lowerCamelCase_ = int(spanish_id_clean[0:8] )
lowerCamelCase_ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__UpperCamelCase ) from ex
if letter.isdigit():
raise ValueError(__UpperCamelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 | 1 |
'''simple docstring'''
import copy
import re
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'hp'
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = None
@classmethod
def UpperCamelCase( cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = prefix
lowerCamelCase_ = defaults
cls.build_naming_info()
@staticmethod
def UpperCamelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return ""
lowerCamelCase_ = None
if any(char.isdigit() for char in word ):
raise Exception(f'''Parameters should not contain numbers: \'{word}\' contains a number''' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(SCREAMING_SNAKE_CASE_ ) + 1 ):
lowerCamelCase_ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
lowerCamelCase_ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = ''
while integer != 0:
lowerCamelCase_ = chr(ord('A' ) + integer % 10 ) + s
integer //= 10
return s
lowerCamelCase_ = 0
while True:
lowerCamelCase_ = word + '#' + int_to_alphabetic(SCREAMING_SNAKE_CASE_ )
if sword in info["reverse_short_word"]:
continue
else:
lowerCamelCase_ = sword
break
lowerCamelCase_ = short_word
lowerCamelCase_ = word
return short_word
@staticmethod
def UpperCamelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = param_name.split('_' )
lowerCamelCase_ = [TrialShortNamer.shortname_for_word(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
lowerCamelCase_ = ['', '_']
for separator in separators:
lowerCamelCase_ = separator.join(SCREAMING_SNAKE_CASE_ )
if shortname not in info["reverse_short_param"]:
lowerCamelCase_ = shortname
lowerCamelCase_ = param_name
return shortname
return param_name
@staticmethod
def UpperCamelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = TrialShortNamer.shortname_for_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = short_name
lowerCamelCase_ = param_name
@classmethod
def UpperCamelCase( cls ) -> str:
'''simple docstring'''
if cls.NAMING_INFO is not None:
return
lowerCamelCase_ = {
'short_word': {},
'reverse_short_word': {},
'short_param': {},
'reverse_short_param': {},
}
lowerCamelCase_ = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = info
@classmethod
def UpperCamelCase( cls , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
cls.build_naming_info()
assert cls.PREFIX is not None
lowerCamelCase_ = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'''You should provide a default value for the param name {k} with value {v}''' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
lowerCamelCase_ = cls.NAMING_INFO['short_param'][k]
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = 1 if v else 0
lowerCamelCase_ = '' if isinstance(SCREAMING_SNAKE_CASE_ , (int, float) ) else '-'
lowerCamelCase_ = f'''{key}{sep}{v}'''
name.append(SCREAMING_SNAKE_CASE_ )
return "_".join(SCREAMING_SNAKE_CASE_ )
@classmethod
def UpperCamelCase( cls , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
lowerCamelCase_ = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
lowerCamelCase_ = []
else:
lowerCamelCase_ = repr.split('_' )
lowerCamelCase_ = {}
for value in values:
if "-" in value:
lowerCamelCase_ ,lowerCamelCase_ = value.split('-' )
else:
lowerCamelCase_ = re.sub('[0-9.]' , '' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = float(re.sub('[^0-9.]' , '' , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = cls.NAMING_INFO['reverse_short_param'][p_k]
lowerCamelCase_ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
lowerCamelCase_ = cls.DEFAULTS[k]
return parameters
| 42 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = GPTSanJapaneseTokenizer
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = {'do_clean_text': False, 'add_prefix_space': False}
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# fmt: off
lowerCamelCase_ = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
lowerCamelCase_ = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
lowerCamelCase_ = {'unk_token': '<unk>'}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(SCREAMING_SNAKE_CASE_ ) )
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = 'こんにちは、世界。 \nこんばんは、㔺界。😀'
lowerCamelCase_ = 'こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.get_input_output_texts(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
return text, ids
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
# Testing tokenization
lowerCamelCase_ = 'こんにちは、世界。 こんばんは、㔺界。'
lowerCamelCase_ = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
lowerCamelCase_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing conversion to ids without special tokens
lowerCamelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing conversion to ids with special tokens
lowerCamelCase_ = tokens + [tokenizer.unk_token]
lowerCamelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
# Testing tokenization
lowerCamelCase_ = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
lowerCamelCase_ = 'こんにちは、、、、世界。こんばんは、、、、世界。'
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
lowerCamelCase_ = 'こんにちは、世界。'
lowerCamelCase_ = 'こんばんは、㔺界。😀'
lowerCamelCase_ = 'こんにちは、世界。こんばんは、世界。😀'
lowerCamelCase_ = tokenizer.encode(prefix_text + input_text )
lowerCamelCase_ = tokenizer.encode('' , prefix_text=prefix_text + input_text )
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , prefix_text=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
lowerCamelCase_ = 'こんにちは、世界。'
lowerCamelCase_ = 'こんばんは、㔺界。😀'
lowerCamelCase_ = len(tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) - 2
lowerCamelCase_ = len(tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) - 2
lowerCamelCase_ = [1] + [0] * (len_prefix + len_text + 1)
lowerCamelCase_ = [1] * (len_prefix + len_text + 1) + [0]
lowerCamelCase_ = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowerCamelCase_ = tokenizer(prefix_text + input_text ).token_type_ids
lowerCamelCase_ = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , prefix_text=SCREAMING_SNAKE_CASE_ ).token_type_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
lowerCamelCase_ = tokenizer.encode('あンいワ' )
lowerCamelCase_ = tokenizer.encode('' , prefix_text='あンいワ' )
lowerCamelCase_ = tokenizer.encode('いワ' , prefix_text='あン' )
self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
lowerCamelCase_ = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.batch_encode_plus(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
# fmt: off
lowerCamelCase_ = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
lowerCamelCase_ = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowerCamelCase_ = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token.token_type_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token.attention_mask , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.input_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.token_type_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.attention_mask , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
pass
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass
| 42 | 1 |
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCAmelCase :
'''simple docstring'''
@property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
return self.get_dummy_input()
@property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , ) -> int:
'''simple docstring'''
lowerCamelCase_ = 4
lowerCamelCase_ = 32
lowerCamelCase_ = (32, 32)
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = torch.device(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = (batch_size, num_channels) + sizes
lowerCamelCase_ = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {'hidden_states': hidden_states}
if include_temb:
lowerCamelCase_ = 128
lowerCamelCase_ = randn_tensor((batch_size, temb_channels) , generator=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ )
if include_res_hidden_states_tuple:
lowerCamelCase_ = torch.manual_seed(1 )
lowerCamelCase_ = (randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ ),)
if include_encoder_hidden_states:
lowerCamelCase_ = floats_tensor((batch_size, 32, 32) ).to(SCREAMING_SNAKE_CASE_ )
if include_skip_sample:
lowerCamelCase_ = randn_tensor(((batch_size, 3) + sizes) , generator=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ )
return dummy_input
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
lowerCamelCase_ = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
lowerCamelCase_ = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.prepare_init_args_and_inputs_for_common()
lowerCamelCase_ = self.block_class(**SCREAMING_SNAKE_CASE_ )
unet_block.to(SCREAMING_SNAKE_CASE_ )
unet_block.eval()
with torch.no_grad():
lowerCamelCase_ = unet_block(**SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = output[0]
self.assertEqual(output.shape , self.output_shape )
lowerCamelCase_ = output[0, -1, -3:, -3:]
lowerCamelCase_ = torch.tensor(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(output_slice.flatten() , SCREAMING_SNAKE_CASE_ , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.prepare_init_args_and_inputs_for_common()
lowerCamelCase_ = self.block_class(**SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
lowerCamelCase_ = model(**SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = output[0]
lowerCamelCase_ = torch.device(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = randn_tensor(output.shape , device=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.nn.functional.mse_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
loss.backward()
| 42 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
A_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Dict:
'''simple docstring'''
super().__init__()
self.register_modules(
vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ = "auto" ) -> List[str]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> List[str]:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = 1
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = len(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE_ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(SCREAMING_SNAKE_CASE_ )}.''' )
# get prompt text embeddings
lowerCamelCase_ = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
lowerCamelCase_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCamelCase_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowerCamelCase_ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowerCamelCase_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = text_embeddings.shape
lowerCamelCase_ = text_embeddings.repeat(1 , SCREAMING_SNAKE_CASE_ , 1 )
lowerCamelCase_ = text_embeddings.view(bs_embed * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase_ = 42
if negative_prompt is None:
lowerCamelCase_ = ['']
elif type(SCREAMING_SNAKE_CASE_ ) is not type(SCREAMING_SNAKE_CASE_ ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(SCREAMING_SNAKE_CASE_ )} !='''
f''' {type(SCREAMING_SNAKE_CASE_ )}.''' )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = [negative_prompt]
elif batch_size != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(SCREAMING_SNAKE_CASE_ )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
' the batch size of `prompt`.' )
else:
lowerCamelCase_ = negative_prompt
lowerCamelCase_ = text_input_ids.shape[-1]
lowerCamelCase_ = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='pt' , )
lowerCamelCase_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCamelCase_ = uncond_embeddings.shape[1]
lowerCamelCase_ = uncond_embeddings.repeat(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 )
lowerCamelCase_ = uncond_embeddings.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
lowerCamelCase_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCamelCase_ = torch.randn(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='cpu' , dtype=SCREAMING_SNAKE_CASE_ ).to(self.device )
lowerCamelCase_ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='cpu' , dtype=SCREAMING_SNAKE_CASE_ ).to(
self.device )
else:
lowerCamelCase_ = torch.randn(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowerCamelCase_ = latents_reference.to(self.device )
lowerCamelCase_ = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowerCamelCase_ = (latents_shape[3] - latents_shape_reference[3]) // 2
lowerCamelCase_ = (latents_shape[2] - latents_shape_reference[2]) // 2
lowerCamelCase_ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowerCamelCase_ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowerCamelCase_ = 0 if dx < 0 else dx
lowerCamelCase_ = 0 if dy < 0 else dy
lowerCamelCase_ = max(-dx , 0 )
lowerCamelCase_ = max(-dy , 0 )
# import pdb
# pdb.set_trace()
lowerCamelCase_ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCamelCase_ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase_ = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ = {}
if accepts_eta:
lowerCamelCase_ = eta
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
lowerCamelCase_ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCamelCase_ ,lowerCamelCase_ = noise_pred.chunk(2 )
lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 1 / 0.18_215 * latents
lowerCamelCase_ = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
lowerCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
lowerCamelCase_ = self.feature_extractor(self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) , return_tensors='pt' ).to(
self.device )
lowerCamelCase_ ,lowerCamelCase_ = self.safety_checker(
images=SCREAMING_SNAKE_CASE_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
lowerCamelCase_ = None
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ , nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
| 42 | 1 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
A_ = get_tests_dir("fixtures")
A_ = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
A_ = get_tests_dir("fixtures/dummy-config.json")
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = 0
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ ).to_dict()
config_dict.pop('feature_extractor_type' )
lowerCamelCase_ = WavaVecaFeatureExtractor(**SCREAMING_SNAKE_CASE_ )
# save in new folder
model_config.save_pretrained(SCREAMING_SNAKE_CASE_ )
config.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
# make sure private variable is not incorrectly saved
lowerCamelCase_ = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE_ , 'bert-base is not a local folder and is not a valid model identifier' ):
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained('bert-base' )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE_ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ , revision='aaaaaa' )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE_ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=SCREAMING_SNAKE_CASE_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ , trust_remote_code=SCREAMING_SNAKE_CASE_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
try:
AutoConfig.register('custom' , SCREAMING_SNAKE_CASE_ )
AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCamelCase_ = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = True
try:
AutoConfig.register('custom' , SCREAMING_SNAKE_CASE_ )
AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# If remote code is not set, the default is to use local
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=SCREAMING_SNAKE_CASE_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=SCREAMING_SNAKE_CASE_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(SCREAMING_SNAKE_CASE_ , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 42 |
'''simple docstring'''
import pprint
import requests
A_ = "https://zenquotes.io/api"
def _UpperCamelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def _UpperCamelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
A_ = random_quotes()
pprint.pprint(response)
| 42 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = CTRLTokenizer
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def UpperCamelCase( self ) -> str:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase_ = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
lowerCamelCase_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCamelCase_ = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
lowerCamelCase_ = {'unk_token': '<unk>'}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE_ ) )
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = 'adapt react readapt apt'
lowerCamelCase_ = 'adapt react readapt apt'
return input_text, output_text
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase_ = 'adapt react readapt apt'
lowerCamelCase_ = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
lowerCamelCase_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokens + [tokenizer.unk_token]
lowerCamelCase_ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
| 42 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=33 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ) -> int:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = EsmModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = EsmForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = EsmForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = ()
SCREAMING_SNAKE_CASE_ = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = True
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = EsmModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase_ = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = EsmModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase_ = EsmEmbeddings(config=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCamelCase_ = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCamelCase_ = create_position_ids_from_input_ids(SCREAMING_SNAKE_CASE_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase_ = EsmEmbeddings(config=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.empty(2 , 4 , 30 )
lowerCamelCase_ = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCamelCase_ = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCamelCase_ = embeddings.create_position_ids_from_inputs_embeds(SCREAMING_SNAKE_CASE_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('Esm does not support embedding resizing' )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
with torch.no_grad():
lowerCamelCase_ = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowerCamelCase_ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )[0]
lowerCamelCase_ = 33
lowerCamelCase_ = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
@slow
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
lowerCamelCase_ = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowerCamelCase_ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )[0]
# compare the actual values for a slice.
lowerCamelCase_ = torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 42 | 1 |
'''simple docstring'''
from __future__ import annotations
A_ = 10
def _UpperCamelCase ( __UpperCamelCase ) -> list[int]:
lowerCamelCase_ = 1
lowerCamelCase_ = max(__UpperCamelCase )
while placement <= max_digit:
# declare and initialize empty buckets
lowerCamelCase_ = [[] for _ in range(__UpperCamelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
lowerCamelCase_ = int((i / placement) % RADIX )
buckets[tmp].append(__UpperCamelCase )
# put each buckets' contents into list_of_ints
lowerCamelCase_ = 0
for b in range(__UpperCamelCase ):
for i in buckets[b]:
lowerCamelCase_ = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 42 | 1 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=0.6 , SCREAMING_SNAKE_CASE_=None , ) -> Any:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = mask_ratio
lowerCamelCase_ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCamelCase_ = (image_size // patch_size) ** 2
lowerCamelCase_ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = ViTMAEModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = ViTMAEForPreTraining(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = (self.image_size // self.patch_size) ** 2
lowerCamelCase_ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = ViTMAEForPreTraining(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = ViTMAEModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
pass
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
np.random.seed(2 )
lowerCamelCase_ = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
lowerCamelCase_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase_ = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCamelCase_ = pt_noise
super().check_pt_tf_models(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = outputs[0].cpu().numpy()
lowerCamelCase_ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# Make sure we don't have nans
lowerCamelCase_ = after_outputs[0].cpu().numpy()
lowerCamelCase_ = 0
lowerCamelCase_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(SCREAMING_SNAKE_CASE_ , 1E-5 )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
pass
@slow
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = ViTMAEModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _UpperCamelCase ( ) -> Optional[int]:
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
np.random.seed(2 )
lowerCamelCase_ = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCamelCase_ = ViTMAEConfig()
lowerCamelCase_ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCamelCase_ = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**SCREAMING_SNAKE_CASE_ , noise=torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ ) )
# verify the logits
lowerCamelCase_ = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(SCREAMING_SNAKE_CASE_ ) , atol=1E-4 ) )
| 42 |
'''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
),
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'xlm-prophetnet'
SCREAMING_SNAKE_CASE_ = ['past_key_values']
SCREAMING_SNAKE_CASE_ = {
'num_attention_heads': 'num_encoder_attention_heads',
}
def __init__( self , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = "gelu" , SCREAMING_SNAKE_CASE_ = 30522 , SCREAMING_SNAKE_CASE_ = 1024 , SCREAMING_SNAKE_CASE_ = 4096 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 4096 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 0.02 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 128 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 2 , **SCREAMING_SNAKE_CASE_ , ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = encoder_ffn_dim
lowerCamelCase_ = num_encoder_layers
lowerCamelCase_ = num_encoder_attention_heads
lowerCamelCase_ = decoder_ffn_dim
lowerCamelCase_ = num_decoder_layers
lowerCamelCase_ = num_decoder_attention_heads
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = init_std # Normal(0, this parameter)
lowerCamelCase_ = activation_function
# parameters for xlmprophetnet
lowerCamelCase_ = ngram
lowerCamelCase_ = num_buckets
lowerCamelCase_ = relative_max_distance
lowerCamelCase_ = disable_ngram_loss
lowerCamelCase_ = eps
# 3 Types of Dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = dropout
lowerCamelCase_ = use_cache
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , add_cross_attention=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'
' `num_decoder_layers`.' )
| 42 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
A_ = None
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
A_ = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
A_ = {
"google/rembert": 256,
}
A_ = "▁"
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = RemBertTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , **SCREAMING_SNAKE_CASE_ , ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , remove_space=SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = remove_space
lowerCamelCase_ = keep_accents
lowerCamelCase_ = vocab_file
lowerCamelCase_ = False if not self.vocab_file else True
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error('Vocabulary path ({}) should be a directory'.format(SCREAMING_SNAKE_CASE_ ) )
return
lowerCamelCase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 42 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> float:
lowerCamelCase_ = x
lowerCamelCase_ = y
for step in range(__UpperCamelCase ): # noqa: B007
lowerCamelCase_ = a * a - b * b + x
lowerCamelCase_ = 2 * a * b + y
lowerCamelCase_ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _UpperCamelCase ( __UpperCamelCase ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def _UpperCamelCase ( __UpperCamelCase ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(__UpperCamelCase ,1 ,1 ) )
def _UpperCamelCase ( __UpperCamelCase = 8_00 ,__UpperCamelCase = 6_00 ,__UpperCamelCase = -0.6 ,__UpperCamelCase = 0 ,__UpperCamelCase = 3.2 ,__UpperCamelCase = 50 ,__UpperCamelCase = True ,) -> Image.Image:
lowerCamelCase_ = Image.new('RGB' ,(image_width, image_height) )
lowerCamelCase_ = img.load()
# loop through the image-coordinates
for image_x in range(__UpperCamelCase ):
for image_y in range(__UpperCamelCase ):
# determine the figure-coordinates based on the image-coordinates
lowerCamelCase_ = figure_width / image_width * image_height
lowerCamelCase_ = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCamelCase_ = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCamelCase_ = get_distance(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCamelCase_ = get_color_coded_rgb(__UpperCamelCase )
else:
lowerCamelCase_ = get_black_and_white_rgb(__UpperCamelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
A_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 42 | 1 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _UpperCamelCase ( __UpperCamelCase ) -> list[str]:
lowerCamelCase_ = []
lowerCamelCase_ = 11
lowerCamelCase_ = int('1' + '0' * digit_len )
for num in range(__UpperCamelCase ,__UpperCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__UpperCamelCase ,__UpperCamelCase ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
lowerCamelCase_ = 10
return solutions
def _UpperCamelCase ( __UpperCamelCase = 2 ) -> int:
lowerCamelCase_ = 1.0
for fraction in fraction_list(__UpperCamelCase ):
lowerCamelCase_ = Fraction(__UpperCamelCase )
result *= frac.denominator / frac.numerator
return int(__UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 42 |
'''simple docstring'''
from math import isclose, sqrt
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> tuple[float, float, float]:
lowerCamelCase_ = point_y / 4 / point_x
lowerCamelCase_ = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCamelCase_ = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCamelCase_ = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCamelCase_ = outgoing_gradient**2 + 4
lowerCamelCase_ = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCamelCase_ = (point_y - outgoing_gradient * point_x) ** 2 - 1_00
lowerCamelCase_ = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCamelCase_ = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCamelCase_ = x_minus if isclose(__UpperCamelCase ,__UpperCamelCase ) else x_plus
lowerCamelCase_ = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _UpperCamelCase ( __UpperCamelCase = 1.4 ,__UpperCamelCase = -9.6 ) -> int:
lowerCamelCase_ = 0
lowerCamelCase_ = first_x_coord
lowerCamelCase_ = first_y_coord
lowerCamelCase_ = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = next_point(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 42 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'philschmid/bart-large-cnn-samsum'
SCREAMING_SNAKE_CASE_ = (
'This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '
'and returns a summary of the text.'
)
SCREAMING_SNAKE_CASE_ = 'summarizer'
SCREAMING_SNAKE_CASE_ = AutoTokenizer
SCREAMING_SNAKE_CASE_ = AutoModelForSeqaSeqLM
SCREAMING_SNAKE_CASE_ = ['text']
SCREAMING_SNAKE_CASE_ = ['text']
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
return self.pre_processor(SCREAMING_SNAKE_CASE_ , return_tensors='pt' , truncation=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
return self.model.generate(**SCREAMING_SNAKE_CASE_ )[0]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
return self.pre_processor.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
| 42 |
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = False ) -> bool:
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_31_70_44_06_46_79_88_73_85_96_19_81 and not allow_probable:
raise ValueError(
'Warning: upper bound of deterministic test is exceeded. '
'Pass allow_probable=True to allow probabilistic test. '
'A return value of True indicates a probable prime.' )
# array bounds provided by analysis
lowerCamelCase_ = [
20_47,
1_37_36_53,
25_32_60_01,
32_15_03_17_51,
2_15_23_02_89_87_47,
3_47_47_49_66_03_83,
3_41_55_00_71_72_83_21,
1,
3_82_51_23_05_65_46_41_30_51,
1,
1,
31_86_65_85_78_34_03_11_51_16_74_61,
3_31_70_44_06_46_79_88_73_85_96_19_81,
]
lowerCamelCase_ = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(__UpperCamelCase ,1 ):
if n < _p:
# then we have our last prime to check
lowerCamelCase_ = primes[:idx]
break
lowerCamelCase_ ,lowerCamelCase_ = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowerCamelCase_ = False
for r in range(__UpperCamelCase ):
lowerCamelCase_ = pow(__UpperCamelCase ,d * 2**r ,__UpperCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowerCamelCase_ = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def _UpperCamelCase ( ) -> None:
assert not miller_rabin(5_61 )
assert miller_rabin(5_63 )
# 2047
assert not miller_rabin(83_82_01 )
assert miller_rabin(83_82_07 )
# 1_373_653
assert not miller_rabin(17_31_60_01 )
assert miller_rabin(17_31_60_17 )
# 25_326_001
assert not miller_rabin(30_78_38_66_41 )
assert miller_rabin(30_78_38_66_53 )
# 3_215_031_751
assert not miller_rabin(1_71_30_45_57_48_01 )
assert miller_rabin(1_71_30_45_57_48_19 )
# 2_152_302_898_747
assert not miller_rabin(2_77_97_99_72_83_07 )
assert miller_rabin(2_77_97_99_72_83_27 )
# 3_474_749_660_383
assert not miller_rabin(1_13_85_00_23_90_94_41 )
assert miller_rabin(1_13_85_00_23_90_95_27 )
# 341_550_071_728_321
assert not miller_rabin(1_27_50_41_01_88_48_80_43_51 )
assert miller_rabin(1_27_50_41_01_88_48_80_43_91 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_96_66_46_44_58_50_77_87_79_18_67 )
assert miller_rabin(7_96_66_46_44_58_50_77_87_79_19_51 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(55_28_40_67_74_46_64_78_97_66_03_33 )
assert miller_rabin(55_28_40_67_74_46_64_78_97_66_03_59 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 42 | 1 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = inspect.getfile(accelerate.test_utils )
lowerCamelCase_ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase_ = test_metrics
@require_cpu
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
debug_launcher(self.test_metrics.main )
@require_single_gpu
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
print(f'''Found {torch.cuda.device_count()} devices.''' )
lowerCamelCase_ = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
| 42 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A_ = 16
A_ = 32
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = 16 ,__UpperCamelCase = "bert-base-cased" ) -> List[Any]:
lowerCamelCase_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
lowerCamelCase_ = load_dataset('glue' ,'mrpc' )
def tokenize_function(__UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ = tokenizer(examples['sentence1'] ,examples['sentence2'] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase_ = datasets.map(
__UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=['idx', 'sentence1', 'sentence2'] ,load_from_cache_file=__UpperCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ = tokenized_datasets.rename_column('label' ,'labels' )
def collate_fn(__UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCamelCase ,padding='max_length' ,max_length=1_28 ,return_tensors='pt' )
return tokenizer.pad(__UpperCamelCase ,padding='longest' ,return_tensors='pt' )
# Instantiate dataloaders.
lowerCamelCase_ = DataLoader(
tokenized_datasets['train'] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
lowerCamelCase_ = DataLoader(
tokenized_datasets['validation'] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
model.eval()
lowerCamelCase_ = 0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ = model(**__UpperCamelCase )
lowerCamelCase_ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCamelCase_ ,lowerCamelCase_ = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__UpperCamelCase ) - 1:
lowerCamelCase_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCamelCase_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__UpperCamelCase ,references=__UpperCamelCase ,)
lowerCamelCase_ = metric.compute()
return eval_metric["accuracy"]
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> List[str]:
# Initialize accelerator
lowerCamelCase_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ = config['lr']
lowerCamelCase_ = int(config['num_epochs'] )
lowerCamelCase_ = int(config['seed'] )
lowerCamelCase_ = int(config['batch_size'] )
lowerCamelCase_ = args.model_name_or_path
set_seed(__UpperCamelCase )
lowerCamelCase_ ,lowerCamelCase_ = get_dataloaders(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained(__UpperCamelCase ,return_dict=__UpperCamelCase )
# Instantiate optimizer
lowerCamelCase_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase_ = optimizer_cls(params=model.parameters() ,lr=__UpperCamelCase )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase_ = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowerCamelCase_ = 1
lowerCamelCase_ = (len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase_ = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase ,num_warmup_steps=0 ,num_training_steps=__UpperCamelCase ,)
else:
lowerCamelCase_ = DummyScheduler(__UpperCamelCase ,total_num_steps=__UpperCamelCase ,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = accelerator.prepare(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase_ = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase_ = 0
lowerCamelCase_ = evaluate.load('glue' ,'mrpc' )
lowerCamelCase_ = num_epochs
if args.partial_train_epoch is not None:
lowerCamelCase_ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCamelCase_ = args.resume_from_checkpoint.split('epoch_' )[1]
lowerCamelCase_ = ''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCamelCase_ = int(__UpperCamelCase ) + 1
lowerCamelCase_ = evaluation_loop(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
accelerator.print('resumed checkpoint performance:' ,__UpperCamelCase )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' ,lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' ,optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir ,f'''state_{starting_epoch-1}.json''' ) ,'r' ) as f:
lowerCamelCase_ = json.load(__UpperCamelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCamelCase_ = {}
for epoch in range(__UpperCamelCase ,__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
lowerCamelCase_ = model(**__UpperCamelCase )
lowerCamelCase_ = outputs.loss
lowerCamelCase_ = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCamelCase_ = f'''epoch_{epoch}'''
lowerCamelCase_ = os.path.join(args.output_dir ,__UpperCamelCase )
accelerator.save_state(__UpperCamelCase )
lowerCamelCase_ = evaluation_loop(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
lowerCamelCase_ = accuracy
lowerCamelCase_ = lr_scheduler.get_lr()[0]
lowerCamelCase_ = optimizer.param_groups[0]['lr']
lowerCamelCase_ = epoch
lowerCamelCase_ = overall_step
accelerator.print(f'''epoch {epoch}:''' ,__UpperCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,f'''state_{epoch}.json''' ) ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCamelCase ( ) -> str:
lowerCamelCase_ = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' ,type=__UpperCamelCase ,default='bert-base-cased' ,help='Path to pretrained model or model identifier from huggingface.co/models.' ,required=__UpperCamelCase ,)
parser.add_argument(
'--output_dir' ,type=__UpperCamelCase ,default='.' ,help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' ,)
parser.add_argument(
'--resume_from_checkpoint' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help='If the training should continue from a checkpoint folder.' ,)
parser.add_argument(
'--partial_train_epoch' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help='If passed, the training will stop after this number of epochs.' ,)
parser.add_argument(
'--num_epochs' ,type=__UpperCamelCase ,default=2 ,help='Number of train epochs.' ,)
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
main()
| 42 | 1 |
'''simple docstring'''
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _UpperCamelCase ( __UpperCamelCase = 8 ) -> str:
lowerCamelCase_ = ascii_letters + digits + punctuation
return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(__UpperCamelCase )
lowerCamelCase_ = i // 3
lowerCamelCase_ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowerCamelCase_ = (
chars_incl
+ random(__UpperCamelCase ,quotient + remainder )
+ random(__UpperCamelCase ,__UpperCamelCase )
+ random(__UpperCamelCase ,__UpperCamelCase )
)
lowerCamelCase_ = list(__UpperCamelCase )
shuffle(__UpperCamelCase )
return "".join(__UpperCamelCase )
# random is a generalised function for letters, characters and numbers
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
pass # Put your code here...
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Tuple:
pass # Put your code here...
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
pass # Put your code here...
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = 8 ) -> bool:
if len(__UpperCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
lowerCamelCase_ = any(char in ascii_uppercase for char in password )
lowerCamelCase_ = any(char in ascii_lowercase for char in password )
lowerCamelCase_ = any(char in digits for char in password )
lowerCamelCase_ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _UpperCamelCase ( ) -> Optional[int]:
lowerCamelCase_ = int(input('Please indicate the max length of your password: ' ).strip() )
lowerCamelCase_ = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' ,password_generator(__UpperCamelCase ) )
print(
'Alternative Password generated:' ,alternative_password_generator(__UpperCamelCase ,__UpperCamelCase ) ,)
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 42 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> np.ndarray:
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
lowerCamelCase_ = ksize + 1
lowerCamelCase_ = np.zeros((ksize, ksize) ,dtype=np.floataa )
# each value
for y in range(__UpperCamelCase ):
for x in range(__UpperCamelCase ):
# distance from center
lowerCamelCase_ = x - ksize // 2
lowerCamelCase_ = y - ksize // 2
# degree to radiant
lowerCamelCase_ = theta / 1_80 * np.pi
lowerCamelCase_ = np.cos(_theta )
lowerCamelCase_ = np.sin(_theta )
# get kernel x
lowerCamelCase_ = cos_theta * px + sin_theta * py
# get kernel y
lowerCamelCase_ = -sin_theta * px + cos_theta * py
# fill kernel
lowerCamelCase_ = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
A_ = imread("../image_data/lena.jpg")
# turn image in gray scale value
A_ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
A_ = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
A_ = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
A_ = out / out.max() * 255
A_ = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 42 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "vocab.txt"}
A_ = {
"vocab_file": {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt",
}
}
A_ = {
"YituTech/conv-bert-base": 512,
"YituTech/conv-bert-medium-small": 512,
"YituTech/conv-bert-small": 512,
}
A_ = {
"YituTech/conv-bert-base": {"do_lower_case": True},
"YituTech/conv-bert-medium-small": {"do_lower_case": True},
"YituTech/conv-bert-small": {"do_lower_case": True},
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ConvBertTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="[UNK]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[PAD]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> int:
'''simple docstring'''
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE_ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE_ ) != tokenize_chinese_chars
):
lowerCamelCase_ = getattr(SCREAMING_SNAKE_CASE_ , normalizer_state.pop('type' ) )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = strip_accents
lowerCamelCase_ = tokenize_chinese_chars
lowerCamelCase_ = normalizer_class(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = do_lower_case
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
'''simple docstring'''
lowerCamelCase_ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 42 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = ['transformers', 'torch', 'note_seq']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def UpperCamelCase( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def UpperCamelCase( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 42 | 1 |
'''simple docstring'''
import json
import sys
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Optional[Any]:
with open(__UpperCamelCase ,encoding='utf-8' ) as f:
lowerCamelCase_ = json.load(__UpperCamelCase )
lowerCamelCase_ = ['<details>', '<summary>Show updated benchmarks!</summary>', ' ']
for benchmark_name in sorted(__UpperCamelCase ):
lowerCamelCase_ = results[benchmark_name]
lowerCamelCase_ = benchmark_name.split('/' )[-1]
output_md.append(f'''### Benchmark: {benchmark_file_name}''' )
lowerCamelCase_ = '| metric |'
lowerCamelCase_ = '|--------|'
lowerCamelCase_ = '| new / old (diff) |'
for metric_name in sorted(__UpperCamelCase ):
lowerCamelCase_ = benchmark_res[metric_name]
lowerCamelCase_ = metric_vals['new']
lowerCamelCase_ = metric_vals.get('old' ,__UpperCamelCase )
lowerCamelCase_ = metric_vals.get('diff' ,__UpperCamelCase )
lowerCamelCase_ = f''' {new_val:f}''' if isinstance(__UpperCamelCase ,(int, float) ) else 'None'
if old_val is not None:
val_str += f''' / {old_val:f}''' if isinstance(__UpperCamelCase ,(int, float) ) else "None"
if dif_val is not None:
val_str += f''' ({dif_val:f})''' if isinstance(__UpperCamelCase ,(int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('</details>' )
with open(__UpperCamelCase ,'w' ,encoding='utf-8' ) as f:
f.writelines('\n'.join(__UpperCamelCase ) )
if __name__ == "__main__":
A_ = sys.argv[1]
A_ = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 42 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return model
@property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , cross_attention_dim=10 , )
return model
@property
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , )
lowerCamelCase_ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return vqvae, unet
@slow
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
lowerCamelCase_ = DDPMScheduler()
lowerCamelCase_ = AudioDiffusionPipeline(vqvae=SCREAMING_SNAKE_CASE_ , unet=self.dummy_unet , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , steps=4 )
lowerCamelCase_ = output.audios[0]
lowerCamelCase_ = output.images[0]
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , steps=4 , return_dict=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.frombuffer(image_from_tuple.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowerCamelCase_ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
lowerCamelCase_ = DDIMScheduler()
lowerCamelCase_ = self.dummy_vqvae_and_unet
lowerCamelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
np.random.seed(0 )
lowerCamelCase_ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(raw_audio=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , start_step=5 , steps=10 )
lowerCamelCase_ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowerCamelCase_ = self.dummy_unet_condition
lowerCamelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=SCREAMING_SNAKE_CASE_ , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
np.random.seed(0 )
lowerCamelCase_ = torch.rand((1, 1, 10) )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , encoding=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output.images[0]
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = torch_device
lowerCamelCase_ = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output.audios[0]
lowerCamelCase_ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 42 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
A_ = logging.get_logger(__name__)
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = ['input_features', 'attention_mask']
def __init__( self , SCREAMING_SNAKE_CASE_=80 , SCREAMING_SNAKE_CASE_=16000 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=25 , SCREAMING_SNAKE_CASE_="hamming_window" , SCREAMING_SNAKE_CASE_=32_768.0 , SCREAMING_SNAKE_CASE_=0.97 , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> int:
'''simple docstring'''
super().__init__(feature_size=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , padding_value=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = feature_size
lowerCamelCase_ = sampling_rate
lowerCamelCase_ = padding_value
lowerCamelCase_ = hop_length
lowerCamelCase_ = win_length
lowerCamelCase_ = frame_signal_scale
lowerCamelCase_ = preemphasis_coeff
lowerCamelCase_ = mel_floor
lowerCamelCase_ = normalize_means
lowerCamelCase_ = normalize_vars
lowerCamelCase_ = win_function
lowerCamelCase_ = return_attention_mask
lowerCamelCase_ = win_length * sampling_rate // 1000
lowerCamelCase_ = hop_length * sampling_rate // 1000
lowerCamelCase_ = optimal_fft_length(self.sample_size )
lowerCamelCase_ = (self.n_fft // 2) + 1
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> np.ndarray:
'''simple docstring'''
if self.win_function == "hamming_window":
lowerCamelCase_ = window_function(window_length=self.sample_size , name=self.win_function , periodic=SCREAMING_SNAKE_CASE_ )
else:
lowerCamelCase_ = window_function(window_length=self.sample_size , name=self.win_function )
lowerCamelCase_ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
lowerCamelCase_ = spectrogram(
one_waveform * self.frame_signal_scale , window=SCREAMING_SNAKE_CASE_ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=SCREAMING_SNAKE_CASE_ , preemphasis=self.preemphasis_coeff , mel_filters=SCREAMING_SNAKE_CASE_ , mel_floor=self.mel_floor , log_mel='log' , )
return msfc_features.T
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
if self.normalize_means:
lowerCamelCase_ = x[:input_length].mean(axis=0 )
lowerCamelCase_ = np.subtract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if self.normalize_vars:
lowerCamelCase_ = x[:input_length].std(axis=0 )
lowerCamelCase_ = np.divide(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if input_length < x.shape[0]:
lowerCamelCase_ = padding_value
# make sure array is in float32
lowerCamelCase_ = x.astype(np.floataa )
return x
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[np.ndarray]:
'''simple docstring'''
lowerCamelCase_ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.padding_value ) for x, n in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowerCamelCase_ = isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCamelCase_ = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase_ = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ):
lowerCamelCase_ = np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase_ = [raw_speech]
# extract fbank features
lowerCamelCase_ = [self._extract_mfsc_features(SCREAMING_SNAKE_CASE_ ) for one_waveform in raw_speech]
# convert into correct format for padding
lowerCamelCase_ = BatchFeature({'input_features': features} )
lowerCamelCase_ = self.pad(
SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# make sure list is in array format
lowerCamelCase_ = padded_inputs.get('input_features' )
if isinstance(input_features[0] , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for feature in input_features]
lowerCamelCase_ = padded_inputs.get('attention_mask' )
if attention_mask is not None:
lowerCamelCase_ = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
lowerCamelCase_ = (
np.array(SCREAMING_SNAKE_CASE_ , dtype=np.intaa )
if self._get_padding_strategies(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
lowerCamelCase_ = self.normalize(
padded_inputs['input_features'] , attention_mask=SCREAMING_SNAKE_CASE_ )
if return_tensors is not None:
lowerCamelCase_ = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE_ )
return padded_inputs
| 42 |
'''simple docstring'''
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _UpperCamelCase ( __UpperCamelCase = 8 ) -> str:
lowerCamelCase_ = ascii_letters + digits + punctuation
return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(__UpperCamelCase )
lowerCamelCase_ = i // 3
lowerCamelCase_ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowerCamelCase_ = (
chars_incl
+ random(__UpperCamelCase ,quotient + remainder )
+ random(__UpperCamelCase ,__UpperCamelCase )
+ random(__UpperCamelCase ,__UpperCamelCase )
)
lowerCamelCase_ = list(__UpperCamelCase )
shuffle(__UpperCamelCase )
return "".join(__UpperCamelCase )
# random is a generalised function for letters, characters and numbers
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
pass # Put your code here...
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Tuple:
pass # Put your code here...
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
pass # Put your code here...
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = 8 ) -> bool:
if len(__UpperCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
lowerCamelCase_ = any(char in ascii_uppercase for char in password )
lowerCamelCase_ = any(char in ascii_lowercase for char in password )
lowerCamelCase_ = any(char in digits for char in password )
lowerCamelCase_ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _UpperCamelCase ( ) -> Optional[int]:
lowerCamelCase_ = int(input('Please indicate the max length of your password: ' ).strip() )
lowerCamelCase_ = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' ,password_generator(__UpperCamelCase ) )
print(
'Alternative Password generated:' ,alternative_password_generator(__UpperCamelCase ,__UpperCamelCase ) ,)
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 42 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 2000 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , **SCREAMING_SNAKE_CASE_ , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
lowerCamelCase_ = self.unet.config.sample_size
lowerCamelCase_ = (batch_size, 3, img_size, img_size)
lowerCamelCase_ = self.unet
lowerCamelCase_ = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ) * self.scheduler.init_noise_sigma
lowerCamelCase_ = sample.to(self.device )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
self.scheduler.set_sigmas(SCREAMING_SNAKE_CASE_ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowerCamelCase_ = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowerCamelCase_ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
lowerCamelCase_ = self.scheduler.step_correct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
# prediction step
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
lowerCamelCase_ = self.scheduler.step_pred(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ ,lowerCamelCase_ = output.prev_sample, output.prev_sample_mean
lowerCamelCase_ = sample_mean.clamp(0 , 1 )
lowerCamelCase_ = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
| 42 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = inspect.getfile(accelerate.test_utils )
lowerCamelCase_ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase_ = test_metrics
@require_cpu
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
debug_launcher(self.test_metrics.main )
@require_single_gpu
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
print(f'''Found {torch.cuda.device_count()} devices.''' )
lowerCamelCase_ = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
| 42 | 1 |
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
A_ = get_tests_dir("fixtures/test_sentencepiece.model")
A_ = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
A_ = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = CamembertTokenizer
SCREAMING_SNAKE_CASE_ = CamembertTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ = CamembertTokenizer(SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = '<pad>'
lowerCamelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>NOTUSED' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1004 )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = CamembertTokenizer(SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
lowerCamelCase_ = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
lowerCamelCase_ = 'I was born in 92000, and this is falsé.'
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = 'I was born in 92000, and this is falsé.'
lowerCamelCase_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = {'input_ids': [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
lowerCamelCase_ = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='camembert-base' , revision='3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf' , sequences=SCREAMING_SNAKE_CASE_ , )
| 42 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def _UpperCamelCase ( __UpperCamelCase ) -> List[str]:
if hor == 1_28:
lowerCamelCase_ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
lowerCamelCase_ = (32, 1_28, 2_56)
lowerCamelCase_ = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
lowerCamelCase_ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
lowerCamelCase_ = (32, 64, 1_28, 2_56)
lowerCamelCase_ = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
lowerCamelCase_ = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
lowerCamelCase_ = model.state_dict()
lowerCamelCase_ = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 6_55_36,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
lowerCamelCase_ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase_ = dict(zip(model.state_dict().keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase_ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() ,f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCamelCase ( ) -> Tuple:
lowerCamelCase_ = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 1_28, 2_56),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 6_55_36,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
lowerCamelCase_ = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
lowerCamelCase_ = model
lowerCamelCase_ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase_ = dict(zip(state_dict.keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase_ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() ,'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 42 | 1 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=[32, 64, 128] , SCREAMING_SNAKE_CASE_=[1, 2, 1] , SCREAMING_SNAKE_CASE_=[2, 2, 4] , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=["stage1", "stage2"] , SCREAMING_SNAKE_CASE_=[1, 2] , ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = embed_dim
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = depths
lowerCamelCase_ = num_heads
lowerCamelCase_ = window_size
lowerCamelCase_ = mlp_ratio
lowerCamelCase_ = qkv_bias
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = hidden_act
lowerCamelCase_ = use_absolute_embeddings
lowerCamelCase_ = patch_norm
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = initializer_range
lowerCamelCase_ = is_training
lowerCamelCase_ = scope
lowerCamelCase_ = use_labels
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = encoder_stride
lowerCamelCase_ = out_features
lowerCamelCase_ = out_indices
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase( self ) -> str:
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = FocalNetModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = FocalNetBackbone(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowerCamelCase_ = None
lowerCamelCase_ = FocalNetBackbone(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
lowerCamelCase_ = FocalNetForMaskedImageModeling(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = FocalNetForMaskedImageModeling(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.type_sequence_label_size
lowerCamelCase_ = FocalNetForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = FocalNetForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = FocalNetModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , embed_dim=37 , has_text_modality=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
return
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
pass
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = outputs.hidden_states
lowerCamelCase_ = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# FocalNet has a different seq_length
lowerCamelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase_ = outputs.reshaped_hidden_states
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = reshaped_hidden_states[0].shape
lowerCamelCase_ = (
reshaped_hidden_states[0].view(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = 3
lowerCamelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) )
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = FocalNetModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = _config_zero_init(SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(config=SCREAMING_SNAKE_CASE_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCamelCase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
lowerCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.tensor([0.2_166, -0.4_368, 0.2_191] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = (FocalNetBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ = FocalNetConfig
SCREAMING_SNAKE_CASE_ = False
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = FocalNetModelTester(self )
| 42 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A_ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A_ = 250_004
A_ = 250_020
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = MBartTokenizer
SCREAMING_SNAKE_CASE_ = MBartTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ = MBartTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = MBartTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCamelCase_ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase_ = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
lowerCamelCase_ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=True
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=False
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'facebook/mbart-large-en-ro'
SCREAMING_SNAKE_CASE_ = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
SCREAMING_SNAKE_CASE_ = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
SCREAMING_SNAKE_CASE_ = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE]
@classmethod
def UpperCamelCase( cls ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
lowerCamelCase_ = 1
return cls
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 250020 )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
self.assertIn(SCREAMING_SNAKE_CASE_ , self.tokenizer.all_special_ids )
lowerCamelCase_ = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
lowerCamelCase_ = self.tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 10
lowerCamelCase_ = self.tokenizer(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [250026, 250001] )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = MBartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , SCREAMING_SNAKE_CASE_ )
@require_torch
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
lowerCamelCase_ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
lowerCamelCase_ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
lowerCamelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(self.src_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=3 , return_tensors='pt' )
lowerCamelCase_ = self.tokenizer(
text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=10 , return_tensors='pt' )
lowerCamelCase_ = targets['input_ids']
lowerCamelCase_ = shift_tokens_right(SCREAMING_SNAKE_CASE_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , {
# A, test, EOS, en_XX
'input_ids': [[62, 3034, 2, 250004]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 250001,
} , )
| 42 | 1 |
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase = 10_00 ) -> int:
return sum(e for e in range(3 ,__UpperCamelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 42 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = 'ylacombe/bark-small'
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = 'en_speaker_1'
lowerCamelCase_ = 'This is a test string'
lowerCamelCase_ = 'speaker_embeddings_path.json'
lowerCamelCase_ = 'speaker_embeddings'
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowerCamelCase_ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCamelCase_ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowerCamelCase_ = 35
lowerCamelCase_ = 2
lowerCamelCase_ = 8
lowerCamelCase_ = {
'semantic_prompt': np.ones(SCREAMING_SNAKE_CASE_ ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowerCamelCase_ = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowerCamelCase_ = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowerCamelCase_ = processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = processor(text=self.input_string )
lowerCamelCase_ = tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 42 | 1 |
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase ) -> str:
if not all(char in '01' for char in bin_string ):
raise ValueError('Non-binary value was passed to the function' )
if not bin_string:
raise ValueError('Empty string was passed to the function' )
lowerCamelCase_ = ''
while len(__UpperCamelCase ) % 3 != 0:
lowerCamelCase_ = '0' + bin_string
lowerCamelCase_ = [
bin_string[index : index + 3]
for index in range(len(__UpperCamelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
lowerCamelCase_ = 0
for index, val in enumerate(__UpperCamelCase ):
oct_val += int(2 ** (2 - index) * int(__UpperCamelCase ) )
oct_string += str(__UpperCamelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 42 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
A_ = {
"vocab_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
},
"merges_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
},
}
A_ = {
"allenai/longformer-base-4096": 4_096,
"allenai/longformer-large-4096": 4_096,
"allenai/longformer-large-4096-finetuned-triviaqa": 4_096,
"allenai/longformer-base-4096-extra.pos.embd.only": 4_096,
"allenai/longformer-large-4096-extra.pos.embd.only": 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCamelCase ( ) -> Any:
lowerCamelCase_ = (
list(range(ord('!' ) ,ord('~' ) + 1 ) ) + list(range(ord('¡' ) ,ord('¬' ) + 1 ) ) + list(range(ord('®' ) ,ord('ÿ' ) + 1 ) )
)
lowerCamelCase_ = bs[:]
lowerCamelCase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCamelCase )
cs.append(2**8 + n )
n += 1
lowerCamelCase_ = [chr(__UpperCamelCase ) for n in cs]
return dict(zip(__UpperCamelCase ,__UpperCamelCase ) )
def _UpperCamelCase ( __UpperCamelCase ) -> List[str]:
lowerCamelCase_ = set()
lowerCamelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase_ = char
return pairs
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask']
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="replace" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as vocab_handle:
lowerCamelCase_ = json.load(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {v: k for k, v in self.encoder.items()}
lowerCamelCase_ = errors # how to handle errors in decoding
lowerCamelCase_ = bytes_to_unicode()
lowerCamelCase_ = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as merges_handle:
lowerCamelCase_ = merges_handle.read().split('\n' )[1:-1]
lowerCamelCase_ = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCamelCase_ = {}
lowerCamelCase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
return len(self.encoder )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCamelCase_ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
lowerCamelCase_ = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_ ,lowerCamelCase_ = bigram
lowerCamelCase_ = []
lowerCamelCase_ = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
lowerCamelCase_ = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase_ = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
lowerCamelCase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = ' '.join(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = word
return word
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
lowerCamelCase_ = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(' ' ) )
return bpe_tokens
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = ''.join(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '\n' )
lowerCamelCase_ = 0
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
lowerCamelCase_ = token_index
writer.write(' '.join(SCREAMING_SNAKE_CASE_ ) + '\n' )
index += 1
return vocab_file, merge_file
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
lowerCamelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()):
lowerCamelCase_ = ' ' + text
return (text, kwargs)
| 42 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def _UpperCamelCase ( __UpperCamelCase ) -> int:
lowerCamelCase_ = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowerCamelCase_ = [1_44, 1_92, 2_40]
lowerCamelCase_ = [16, 32, 64, 96, 1_28, 1_60, 6_40]
elif "mobilevit_xs" in mobilevit_name:
lowerCamelCase_ = [96, 1_20, 1_44]
lowerCamelCase_ = [16, 32, 48, 64, 80, 96, 3_84]
elif "mobilevit_xxs" in mobilevit_name:
lowerCamelCase_ = [64, 80, 96]
lowerCamelCase_ = [16, 16, 24, 48, 64, 80, 3_20]
lowerCamelCase_ = 0.05
lowerCamelCase_ = 2.0
if mobilevit_name.startswith('deeplabv3_' ):
lowerCamelCase_ = 5_12
lowerCamelCase_ = 16
lowerCamelCase_ = 21
lowerCamelCase_ = 'pascal-voc-id2label.json'
else:
lowerCamelCase_ = 10_00
lowerCamelCase_ = 'imagenet-1k-id2label.json'
lowerCamelCase_ = 'huggingface/label-files'
lowerCamelCase_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type='dataset' ) ,'r' ) )
lowerCamelCase_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
return config
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase=False ) -> Optional[Any]:
for i in range(1 ,6 ):
if f'''layer_{i}.''' in name:
lowerCamelCase_ = name.replace(f'''layer_{i}.''' ,f'''encoder.layer.{i - 1}.''' )
if "conv_1." in name:
lowerCamelCase_ = name.replace('conv_1.' ,'conv_stem.' )
if ".block." in name:
lowerCamelCase_ = name.replace('.block.' ,'.' )
if "exp_1x1" in name:
lowerCamelCase_ = name.replace('exp_1x1' ,'expand_1x1' )
if "red_1x1" in name:
lowerCamelCase_ = name.replace('red_1x1' ,'reduce_1x1' )
if ".local_rep.conv_3x3." in name:
lowerCamelCase_ = name.replace('.local_rep.conv_3x3.' ,'.conv_kxk.' )
if ".local_rep.conv_1x1." in name:
lowerCamelCase_ = name.replace('.local_rep.conv_1x1.' ,'.conv_1x1.' )
if ".norm." in name:
lowerCamelCase_ = name.replace('.norm.' ,'.normalization.' )
if ".conv." in name:
lowerCamelCase_ = name.replace('.conv.' ,'.convolution.' )
if ".conv_proj." in name:
lowerCamelCase_ = name.replace('.conv_proj.' ,'.conv_projection.' )
for i in range(0 ,2 ):
for j in range(0 ,4 ):
if f'''.{i}.{j}.''' in name:
lowerCamelCase_ = name.replace(f'''.{i}.{j}.''' ,f'''.{i}.layer.{j}.''' )
for i in range(2 ,6 ):
for j in range(0 ,4 ):
if f'''.{i}.{j}.''' in name:
lowerCamelCase_ = name.replace(f'''.{i}.{j}.''' ,f'''.{i}.''' )
if "expand_1x1" in name:
lowerCamelCase_ = name.replace('expand_1x1' ,'downsampling_layer.expand_1x1' )
if "conv_3x3" in name:
lowerCamelCase_ = name.replace('conv_3x3' ,'downsampling_layer.conv_3x3' )
if "reduce_1x1" in name:
lowerCamelCase_ = name.replace('reduce_1x1' ,'downsampling_layer.reduce_1x1' )
for i in range(2 ,5 ):
if f'''.global_rep.{i}.weight''' in name:
lowerCamelCase_ = name.replace(f'''.global_rep.{i}.weight''' ,'.layernorm.weight' )
if f'''.global_rep.{i}.bias''' in name:
lowerCamelCase_ = name.replace(f'''.global_rep.{i}.bias''' ,'.layernorm.bias' )
if ".global_rep." in name:
lowerCamelCase_ = name.replace('.global_rep.' ,'.transformer.' )
if ".pre_norm_mha.0." in name:
lowerCamelCase_ = name.replace('.pre_norm_mha.0.' ,'.layernorm_before.' )
if ".pre_norm_mha.1.out_proj." in name:
lowerCamelCase_ = name.replace('.pre_norm_mha.1.out_proj.' ,'.attention.output.dense.' )
if ".pre_norm_ffn.0." in name:
lowerCamelCase_ = name.replace('.pre_norm_ffn.0.' ,'.layernorm_after.' )
if ".pre_norm_ffn.1." in name:
lowerCamelCase_ = name.replace('.pre_norm_ffn.1.' ,'.intermediate.dense.' )
if ".pre_norm_ffn.4." in name:
lowerCamelCase_ = name.replace('.pre_norm_ffn.4.' ,'.output.dense.' )
if ".transformer." in name:
lowerCamelCase_ = name.replace('.transformer.' ,'.transformer.layer.' )
if ".aspp_layer." in name:
lowerCamelCase_ = name.replace('.aspp_layer.' ,'.' )
if ".aspp_pool." in name:
lowerCamelCase_ = name.replace('.aspp_pool.' ,'.' )
if "seg_head." in name:
lowerCamelCase_ = name.replace('seg_head.' ,'segmentation_head.' )
if "segmentation_head.classifier.classifier." in name:
lowerCamelCase_ = name.replace('segmentation_head.classifier.classifier.' ,'segmentation_head.classifier.' )
if "classifier.fc." in name:
lowerCamelCase_ = name.replace('classifier.fc.' ,'classifier.' )
elif (not base_model) and ("segmentation_head." not in name):
lowerCamelCase_ = 'mobilevit.' + name
return name
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=False ) -> List[str]:
if base_model:
lowerCamelCase_ = ''
else:
lowerCamelCase_ = 'mobilevit.'
for key in orig_state_dict.copy().keys():
lowerCamelCase_ = orig_state_dict.pop(__UpperCamelCase )
if key[:8] == "encoder.":
lowerCamelCase_ = key[8:]
if "qkv" in key:
lowerCamelCase_ = key.split('.' )
lowerCamelCase_ = int(key_split[0][6:] ) - 1
lowerCamelCase_ = int(key_split[3] )
lowerCamelCase_ = model.get_submodule(f'''{model_prefix}encoder.layer.{layer_num}''' )
lowerCamelCase_ = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowerCamelCase_ = (
f'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'''
)
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[dim : dim * 2, :]
lowerCamelCase_ = val[-dim:, :]
else:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[dim : dim * 2]
lowerCamelCase_ = val[-dim:]
else:
lowerCamelCase_ = val
return orig_state_dict
def _UpperCamelCase ( ) -> List[Any]:
lowerCamelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=False ) -> Any:
lowerCamelCase_ = get_mobilevit_config(__UpperCamelCase )
# load original state_dict
lowerCamelCase_ = torch.load(__UpperCamelCase ,map_location='cpu' )
# load 🤗 model
if mobilevit_name.startswith('deeplabv3_' ):
lowerCamelCase_ = MobileViTForSemanticSegmentation(__UpperCamelCase ).eval()
else:
lowerCamelCase_ = MobileViTForImageClassification(__UpperCamelCase ).eval()
lowerCamelCase_ = convert_state_dict(__UpperCamelCase ,__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase_ = MobileViTImageProcessor(crop_size=config.image_size ,size=config.image_size + 32 )
lowerCamelCase_ = image_processor(images=prepare_img() ,return_tensors='pt' )
lowerCamelCase_ = model(**__UpperCamelCase )
lowerCamelCase_ = outputs.logits
if mobilevit_name.startswith('deeplabv3_' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowerCamelCase_ = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowerCamelCase_ = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowerCamelCase_ = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3, :3, :3] ,__UpperCamelCase ,atol=1e-4 )
else:
assert logits.shape == (1, 10_00)
if mobilevit_name == "mobilevit_s":
lowerCamelCase_ = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
lowerCamelCase_ = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
lowerCamelCase_ = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3] ,__UpperCamelCase ,atol=1e-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
lowerCamelCase_ = {
'mobilevit_s': 'mobilevit-small',
'mobilevit_xs': 'mobilevit-x-small',
'mobilevit_xxs': 'mobilevit-xx-small',
'deeplabv3_mobilevit_s': 'deeplabv3-mobilevit-small',
'deeplabv3_mobilevit_xs': 'deeplabv3-mobilevit-x-small',
'deeplabv3_mobilevit_xxs': 'deeplabv3-mobilevit-xx-small',
}
print('Pushing to the hub...' )
lowerCamelCase_ = model_mapping[mobilevit_name]
image_processor.push_to_hub(__UpperCamelCase ,organization='apple' )
model.push_to_hub(__UpperCamelCase ,organization='apple' )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--mobilevit_name",
default="mobilevit_s",
type=str,
help=(
"Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"
" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."
),
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
A_ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 42 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
A_ = {
"vocab_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt",
},
"tokenizer_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"
),
"google/realm-orqa-nq-openqa": (
"https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-nq-reader": (
"https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-openqa": (
"https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-reader": (
"https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"
),
},
}
A_ = {
"google/realm-cc-news-pretrained-embedder": 512,
"google/realm-cc-news-pretrained-encoder": 512,
"google/realm-cc-news-pretrained-scorer": 512,
"google/realm-cc-news-pretrained-openqa": 512,
"google/realm-orqa-nq-openqa": 512,
"google/realm-orqa-nq-reader": 512,
"google/realm-orqa-wq-openqa": 512,
"google/realm-orqa-wq-reader": 512,
}
A_ = {
"google/realm-cc-news-pretrained-embedder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-encoder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-scorer": {"do_lower_case": True},
"google/realm-cc-news-pretrained-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-reader": {"do_lower_case": True},
"google/realm-orqa-wq-openqa": {"do_lower_case": True},
"google/realm-orqa-wq-reader": {"do_lower_case": True},
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = RealmTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="[UNK]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[PAD]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> List[Any]:
'''simple docstring'''
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE_ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE_ ) != tokenize_chinese_chars
):
lowerCamelCase_ = getattr(SCREAMING_SNAKE_CASE_ , normalizer_state.pop('type' ) )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = strip_accents
lowerCamelCase_ = tokenize_chinese_chars
lowerCamelCase_ = normalizer_class(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = do_lower_case
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = PaddingStrategy.MAX_LENGTH
lowerCamelCase_ = text
lowerCamelCase_ = kwargs.pop('text_pair' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = kwargs.pop('return_tensors' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(SCREAMING_SNAKE_CASE_ ):
if batch_text_pair is not None:
lowerCamelCase_ = batch_text_pair[idx]
else:
lowerCamelCase_ = None
lowerCamelCase_ = super().__call__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = encoded_candidates.get('input_ids' )
lowerCamelCase_ = encoded_candidates.get('attention_mask' )
lowerCamelCase_ = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(SCREAMING_SNAKE_CASE_ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(SCREAMING_SNAKE_CASE_ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {key: item for key, item in output_data.items() if len(SCREAMING_SNAKE_CASE_ ) != 0}
return BatchEncoding(SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
'''simple docstring'''
lowerCamelCase_ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 42 | 1 |
'''simple docstring'''
import numpy as np
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> np.ndarray:
return np.where(vector > 0 ,__UpperCamelCase ,(alpha * (np.exp(__UpperCamelCase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __UpperCamelCase ) -> bool:
lowerCamelCase_ = str(__UpperCamelCase )
return len(__UpperCamelCase ) == 9 and set(__UpperCamelCase ) == set('123456789' )
def _UpperCamelCase ( ) -> int | None:
for base_num in range(99_99 ,49_99 ,-1 ):
lowerCamelCase_ = 10_00_02 * base_num
if is_9_pandigital(__UpperCamelCase ):
return candidate
for base_num in range(3_33 ,99 ,-1 ):
lowerCamelCase_ = 1_00_20_03 * base_num
if is_9_pandigital(__UpperCamelCase ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 42 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
A_ = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42 |
'''simple docstring'''
A_ = "Input must be a string of 8 numbers plus letter"
A_ = "TRWAGMYFPDXBNJZSQVHLCKE"
def _UpperCamelCase ( __UpperCamelCase ) -> bool:
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowerCamelCase_ = f'''Expected string as input, found {type(__UpperCamelCase ).__name__}'''
raise TypeError(__UpperCamelCase )
lowerCamelCase_ = spanish_id.replace('-' ,'' ).upper()
if len(__UpperCamelCase ) != 9:
raise ValueError(__UpperCamelCase )
try:
lowerCamelCase_ = int(spanish_id_clean[0:8] )
lowerCamelCase_ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__UpperCamelCase ) from ex
if letter.isdigit():
raise ValueError(__UpperCamelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 | 1 |
'''simple docstring'''
A_ = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 42 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = GPTSanJapaneseTokenizer
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = {'do_clean_text': False, 'add_prefix_space': False}
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# fmt: off
lowerCamelCase_ = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
lowerCamelCase_ = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
lowerCamelCase_ = {'unk_token': '<unk>'}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(SCREAMING_SNAKE_CASE_ ) )
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = 'こんにちは、世界。 \nこんばんは、㔺界。😀'
lowerCamelCase_ = 'こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.get_input_output_texts(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
return text, ids
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
# Testing tokenization
lowerCamelCase_ = 'こんにちは、世界。 こんばんは、㔺界。'
lowerCamelCase_ = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
lowerCamelCase_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing conversion to ids without special tokens
lowerCamelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing conversion to ids with special tokens
lowerCamelCase_ = tokens + [tokenizer.unk_token]
lowerCamelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
# Testing tokenization
lowerCamelCase_ = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
lowerCamelCase_ = 'こんにちは、、、、世界。こんばんは、、、、世界。'
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
lowerCamelCase_ = 'こんにちは、世界。'
lowerCamelCase_ = 'こんばんは、㔺界。😀'
lowerCamelCase_ = 'こんにちは、世界。こんばんは、世界。😀'
lowerCamelCase_ = tokenizer.encode(prefix_text + input_text )
lowerCamelCase_ = tokenizer.encode('' , prefix_text=prefix_text + input_text )
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , prefix_text=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
lowerCamelCase_ = 'こんにちは、世界。'
lowerCamelCase_ = 'こんばんは、㔺界。😀'
lowerCamelCase_ = len(tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) - 2
lowerCamelCase_ = len(tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) - 2
lowerCamelCase_ = [1] + [0] * (len_prefix + len_text + 1)
lowerCamelCase_ = [1] * (len_prefix + len_text + 1) + [0]
lowerCamelCase_ = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowerCamelCase_ = tokenizer(prefix_text + input_text ).token_type_ids
lowerCamelCase_ = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , prefix_text=SCREAMING_SNAKE_CASE_ ).token_type_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
lowerCamelCase_ = tokenizer.encode('あンいワ' )
lowerCamelCase_ = tokenizer.encode('' , prefix_text='あンいワ' )
lowerCamelCase_ = tokenizer.encode('いワ' , prefix_text='あン' )
self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
lowerCamelCase_ = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.batch_encode_plus(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
# fmt: off
lowerCamelCase_ = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
lowerCamelCase_ = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowerCamelCase_ = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token.token_type_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token.attention_mask , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.input_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.token_type_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.attention_mask , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
pass
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass
| 42 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A_ = {"tokenization_wav2vec2_phoneme": ["Wav2Vec2PhonemeCTCTokenizer"]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
A_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Dict:
'''simple docstring'''
super().__init__()
self.register_modules(
vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ = "auto" ) -> List[str]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> List[str]:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = 1
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = len(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE_ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(SCREAMING_SNAKE_CASE_ )}.''' )
# get prompt text embeddings
lowerCamelCase_ = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
lowerCamelCase_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCamelCase_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowerCamelCase_ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowerCamelCase_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = text_embeddings.shape
lowerCamelCase_ = text_embeddings.repeat(1 , SCREAMING_SNAKE_CASE_ , 1 )
lowerCamelCase_ = text_embeddings.view(bs_embed * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase_ = 42
if negative_prompt is None:
lowerCamelCase_ = ['']
elif type(SCREAMING_SNAKE_CASE_ ) is not type(SCREAMING_SNAKE_CASE_ ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(SCREAMING_SNAKE_CASE_ )} !='''
f''' {type(SCREAMING_SNAKE_CASE_ )}.''' )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = [negative_prompt]
elif batch_size != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(SCREAMING_SNAKE_CASE_ )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
' the batch size of `prompt`.' )
else:
lowerCamelCase_ = negative_prompt
lowerCamelCase_ = text_input_ids.shape[-1]
lowerCamelCase_ = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='pt' , )
lowerCamelCase_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCamelCase_ = uncond_embeddings.shape[1]
lowerCamelCase_ = uncond_embeddings.repeat(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 )
lowerCamelCase_ = uncond_embeddings.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
lowerCamelCase_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCamelCase_ = torch.randn(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='cpu' , dtype=SCREAMING_SNAKE_CASE_ ).to(self.device )
lowerCamelCase_ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='cpu' , dtype=SCREAMING_SNAKE_CASE_ ).to(
self.device )
else:
lowerCamelCase_ = torch.randn(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowerCamelCase_ = latents_reference.to(self.device )
lowerCamelCase_ = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowerCamelCase_ = (latents_shape[3] - latents_shape_reference[3]) // 2
lowerCamelCase_ = (latents_shape[2] - latents_shape_reference[2]) // 2
lowerCamelCase_ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowerCamelCase_ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowerCamelCase_ = 0 if dx < 0 else dx
lowerCamelCase_ = 0 if dy < 0 else dy
lowerCamelCase_ = max(-dx , 0 )
lowerCamelCase_ = max(-dy , 0 )
# import pdb
# pdb.set_trace()
lowerCamelCase_ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCamelCase_ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase_ = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ = {}
if accepts_eta:
lowerCamelCase_ = eta
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
lowerCamelCase_ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCamelCase_ ,lowerCamelCase_ = noise_pred.chunk(2 )
lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 1 / 0.18_215 * latents
lowerCamelCase_ = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
lowerCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
lowerCamelCase_ = self.feature_extractor(self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) , return_tensors='pt' ).to(
self.device )
lowerCamelCase_ ,lowerCamelCase_ = self.safety_checker(
images=SCREAMING_SNAKE_CASE_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
lowerCamelCase_ = None
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ , nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
| 42 | 1 |
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
A_ = collections.namedtuple("_Datasets", ["train", "validation", "test"])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
A_ = "https://storage.googleapis.com/cvdf-datasets/mnist/"
def _UpperCamelCase ( __UpperCamelCase ) -> int:
lowerCamelCase_ = numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ) ,dtype=__UpperCamelCase )[0]
@deprecated(__UpperCamelCase ,'Please use tf.data to implement this functionality.' )
def _UpperCamelCase ( __UpperCamelCase ) -> Any:
print('Extracting' ,f.name )
with gzip.GzipFile(fileobj=__UpperCamelCase ) as bytestream:
lowerCamelCase_ = _readaa(__UpperCamelCase )
if magic != 20_51:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
lowerCamelCase_ = _readaa(__UpperCamelCase )
lowerCamelCase_ = _readaa(__UpperCamelCase )
lowerCamelCase_ = _readaa(__UpperCamelCase )
lowerCamelCase_ = bytestream.read(rows * cols * num_images )
lowerCamelCase_ = numpy.frombuffer(__UpperCamelCase ,dtype=numpy.uinta )
lowerCamelCase_ = data.reshape(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,1 )
return data
@deprecated(__UpperCamelCase ,'Please use tf.one_hot on tensors.' )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Tuple:
lowerCamelCase_ = labels_dense.shape[0]
lowerCamelCase_ = numpy.arange(__UpperCamelCase ) * num_classes
lowerCamelCase_ = numpy.zeros((num_labels, num_classes) )
lowerCamelCase_ = 1
return labels_one_hot
@deprecated(__UpperCamelCase ,'Please use tf.data to implement this functionality.' )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase=False ,__UpperCamelCase=10 ) -> Union[str, Any]:
print('Extracting' ,f.name )
with gzip.GzipFile(fileobj=__UpperCamelCase ) as bytestream:
lowerCamelCase_ = _readaa(__UpperCamelCase )
if magic != 20_49:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
lowerCamelCase_ = _readaa(__UpperCamelCase )
lowerCamelCase_ = bytestream.read(__UpperCamelCase )
lowerCamelCase_ = numpy.frombuffer(__UpperCamelCase ,dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(__UpperCamelCase ,__UpperCamelCase )
return labels
class UpperCAmelCase :
'''simple docstring'''
@deprecated(
SCREAMING_SNAKE_CASE_ , 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' , )
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=dtypes.floataa , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = random_seed.get_seed(SCREAMING_SNAKE_CASE_ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowerCamelCase_ = dtypes.as_dtype(SCREAMING_SNAKE_CASE_ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
lowerCamelCase_ = 10000
lowerCamelCase_ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
lowerCamelCase_ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowerCamelCase_ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowerCamelCase_ = images.astype(numpy.floataa )
lowerCamelCase_ = numpy.multiply(SCREAMING_SNAKE_CASE_ , 1.0 / 255.0 )
lowerCamelCase_ = images
lowerCamelCase_ = labels
lowerCamelCase_ = 0
lowerCamelCase_ = 0
@property
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
return self._images
@property
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
return self._labels
@property
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
return self._num_examples
@property
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
return self._epochs_completed
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True ) -> int:
'''simple docstring'''
if fake_data:
lowerCamelCase_ = [1] * 784
lowerCamelCase_ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(SCREAMING_SNAKE_CASE_ )],
[fake_label for _ in range(SCREAMING_SNAKE_CASE_ )],
)
lowerCamelCase_ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowerCamelCase_ = numpy.arange(self._num_examples )
numpy.random.shuffle(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.images[perma]
lowerCamelCase_ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowerCamelCase_ = self._num_examples - start
lowerCamelCase_ = self._images[start : self._num_examples]
lowerCamelCase_ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowerCamelCase_ = numpy.arange(self._num_examples )
numpy.random.shuffle(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.images[perm]
lowerCamelCase_ = self.labels[perm]
# Start next epoch
lowerCamelCase_ = 0
lowerCamelCase_ = batch_size - rest_num_examples
lowerCamelCase_ = self._index_in_epoch
lowerCamelCase_ = self._images[start:end]
lowerCamelCase_ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowerCamelCase_ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(__UpperCamelCase ,'Please write your own downloading logic.' )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int:
if not gfile.Exists(__UpperCamelCase ):
gfile.MakeDirs(__UpperCamelCase )
lowerCamelCase_ = os.path.join(__UpperCamelCase ,__UpperCamelCase )
if not gfile.Exists(__UpperCamelCase ):
urllib.request.urlretrieve(__UpperCamelCase ,__UpperCamelCase ) # noqa: S310
with gfile.GFile(__UpperCamelCase ) as f:
lowerCamelCase_ = f.size()
print('Successfully downloaded' ,__UpperCamelCase ,__UpperCamelCase ,'bytes.' )
return filepath
@deprecated(
__UpperCamelCase ,'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase=False ,__UpperCamelCase=False ,__UpperCamelCase=dtypes.floataa ,__UpperCamelCase=True ,__UpperCamelCase=50_00 ,__UpperCamelCase=None ,__UpperCamelCase=DEFAULT_SOURCE_URL ,) -> Union[str, Any]:
if fake_data:
def fake():
return _DataSet(
[] ,[] ,fake_data=__UpperCamelCase ,one_hot=__UpperCamelCase ,dtype=__UpperCamelCase ,seed=__UpperCamelCase )
lowerCamelCase_ = fake()
lowerCamelCase_ = fake()
lowerCamelCase_ = fake()
return _Datasets(train=__UpperCamelCase ,validation=__UpperCamelCase ,test=__UpperCamelCase )
if not source_url: # empty string check
lowerCamelCase_ = DEFAULT_SOURCE_URL
lowerCamelCase_ = 'train-images-idx3-ubyte.gz'
lowerCamelCase_ = 'train-labels-idx1-ubyte.gz'
lowerCamelCase_ = 't10k-images-idx3-ubyte.gz'
lowerCamelCase_ = 't10k-labels-idx1-ubyte.gz'
lowerCamelCase_ = _maybe_download(
__UpperCamelCase ,__UpperCamelCase ,source_url + train_images_file )
with gfile.Open(__UpperCamelCase ,'rb' ) as f:
lowerCamelCase_ = _extract_images(__UpperCamelCase )
lowerCamelCase_ = _maybe_download(
__UpperCamelCase ,__UpperCamelCase ,source_url + train_labels_file )
with gfile.Open(__UpperCamelCase ,'rb' ) as f:
lowerCamelCase_ = _extract_labels(__UpperCamelCase ,one_hot=__UpperCamelCase )
lowerCamelCase_ = _maybe_download(
__UpperCamelCase ,__UpperCamelCase ,source_url + test_images_file )
with gfile.Open(__UpperCamelCase ,'rb' ) as f:
lowerCamelCase_ = _extract_images(__UpperCamelCase )
lowerCamelCase_ = _maybe_download(
__UpperCamelCase ,__UpperCamelCase ,source_url + test_labels_file )
with gfile.Open(__UpperCamelCase ,'rb' ) as f:
lowerCamelCase_ = _extract_labels(__UpperCamelCase ,one_hot=__UpperCamelCase )
if not 0 <= validation_size <= len(__UpperCamelCase ):
lowerCamelCase_ = (
'Validation size should be between 0 and '
f'''{len(__UpperCamelCase )}. Received: {validation_size}.'''
)
raise ValueError(__UpperCamelCase )
lowerCamelCase_ = train_images[:validation_size]
lowerCamelCase_ = train_labels[:validation_size]
lowerCamelCase_ = train_images[validation_size:]
lowerCamelCase_ = train_labels[validation_size:]
lowerCamelCase_ = {'dtype': dtype, 'reshape': reshape, 'seed': seed}
lowerCamelCase_ = _DataSet(__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase )
lowerCamelCase_ = _DataSet(__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase )
lowerCamelCase_ = _DataSet(__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase )
return _Datasets(train=__UpperCamelCase ,validation=__UpperCamelCase ,test=__UpperCamelCase )
| 42 |
'''simple docstring'''
import pprint
import requests
A_ = "https://zenquotes.io/api"
def _UpperCamelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def _UpperCamelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
A_ = random_quotes()
pprint.pprint(response)
| 42 | 1 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
A_ = ""
A_ = ""
A_ = ""
A_ = 1 # (0 is vertical, 1 is horizontal)
def _UpperCamelCase ( ) -> None:
lowerCamelCase_ ,lowerCamelCase_ = get_dataset(__UpperCamelCase ,__UpperCamelCase )
print('Processing...' )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = update_image_and_anno(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
for index, image in enumerate(__UpperCamelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowerCamelCase_ = random_chars(32 )
lowerCamelCase_ = paths[index].split(os.sep )[-1].rsplit('.' ,1 )[0]
lowerCamelCase_ = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(f'''/{file_root}.jpg''' ,__UpperCamelCase ,[cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Success {index+1}/{len(__UpperCamelCase )} with {file_name}''' )
lowerCamelCase_ = []
for anno in new_annos[index]:
lowerCamelCase_ = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(__UpperCamelCase )
with open(f'''/{file_root}.txt''' ,'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> tuple[list, list]:
lowerCamelCase_ = []
lowerCamelCase_ = []
for label_file in glob.glob(os.path.join(__UpperCamelCase ,'*.txt' ) ):
lowerCamelCase_ = label_file.split(os.sep )[-1].rsplit('.' ,1 )[0]
with open(__UpperCamelCase ) as in_file:
lowerCamelCase_ = in_file.readlines()
lowerCamelCase_ = os.path.join(__UpperCamelCase ,f'''{label_name}.jpg''' )
lowerCamelCase_ = []
for obj_list in obj_lists:
lowerCamelCase_ = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__UpperCamelCase )
labels.append(__UpperCamelCase )
return img_paths, labels
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = 1 ) -> tuple[list, list, list]:
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = []
for idx in range(len(__UpperCamelCase ) ):
lowerCamelCase_ = []
lowerCamelCase_ = img_list[idx]
path_list.append(__UpperCamelCase )
lowerCamelCase_ = anno_list[idx]
lowerCamelCase_ = cva.imread(__UpperCamelCase )
if flip_type == 1:
lowerCamelCase_ = cva.flip(__UpperCamelCase ,__UpperCamelCase )
for bbox in img_annos:
lowerCamelCase_ = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
lowerCamelCase_ = cva.flip(__UpperCamelCase ,__UpperCamelCase )
for bbox in img_annos:
lowerCamelCase_ = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__UpperCamelCase )
new_imgs_list.append(__UpperCamelCase )
return new_imgs_list, new_annos_lists, path_list
def _UpperCamelCase ( __UpperCamelCase = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
lowerCamelCase_ = ascii_lowercase + digits
return "".join(random.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 42 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=33 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ) -> int:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = EsmModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = EsmForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = EsmForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = ()
SCREAMING_SNAKE_CASE_ = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = True
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = EsmModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase_ = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = EsmModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase_ = EsmEmbeddings(config=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCamelCase_ = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCamelCase_ = create_position_ids_from_input_ids(SCREAMING_SNAKE_CASE_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase_ = EsmEmbeddings(config=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.empty(2 , 4 , 30 )
lowerCamelCase_ = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCamelCase_ = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCamelCase_ = embeddings.create_position_ids_from_inputs_embeds(SCREAMING_SNAKE_CASE_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('Esm does not support embedding resizing' )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
with torch.no_grad():
lowerCamelCase_ = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowerCamelCase_ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )[0]
lowerCamelCase_ = 33
lowerCamelCase_ = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
@slow
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
lowerCamelCase_ = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowerCamelCase_ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )[0]
# compare the actual values for a slice.
lowerCamelCase_ = torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 42 | 1 |
'''simple docstring'''
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
A_ = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
A_ = [0, 25, 50]
A_ = [25, 50, 75]
A_ = fuzz.membership.trimf(X, abca)
A_ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
A_ = np.ones(75)
A_ = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
A_ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
A_ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
A_ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
A_ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
A_ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
A_ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
A_ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
A_ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("Young")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("Middle aged")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("union")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("intersection")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("complement_a")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("difference a/b")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("alg_sum")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("alg_product")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("bdd_sum")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("bdd_difference")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 42 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 42 | 1 |
'''simple docstring'''
from math import factorial
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = real
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = [1] * rank
else:
lowerCamelCase_ = rank
def __repr__( self ) -> Any:
'''simple docstring'''
return (
f'''{self.real}+'''
f'''{"+".join(str(SCREAMING_SNAKE_CASE_ )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'''
)
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , SCREAMING_SNAKE_CASE_ )
def __add__( self , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return Dual(self.real + other , self.duals )
lowerCamelCase_ = self.duals.copy()
lowerCamelCase_ = other.duals.copy()
if len(SCREAMING_SNAKE_CASE_ ) > len(SCREAMING_SNAKE_CASE_ ):
o_dual.extend([1] * (len(SCREAMING_SNAKE_CASE_ ) - len(SCREAMING_SNAKE_CASE_ )) )
elif len(SCREAMING_SNAKE_CASE_ ) < len(SCREAMING_SNAKE_CASE_ ):
s_dual.extend([1] * (len(SCREAMING_SNAKE_CASE_ ) - len(SCREAMING_SNAKE_CASE_ )) )
lowerCamelCase_ = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = __add__
def __sub__( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
return self + other * -1
def __mul__( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = __mul__
def __truediv__( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , SCREAMING_SNAKE_CASE_ )
raise ValueError
def __floordiv__( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , SCREAMING_SNAKE_CASE_ )
raise ValueError
def __pow__( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
if n < 0 or isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError('power must be a positive integer' )
if n == 0:
return 1
if n == 1:
return self
lowerCamelCase_ = self
for _ in range(n - 1 ):
x *= self
return x
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
if not callable(__UpperCamelCase ):
raise ValueError('differentiate() requires a function as input for func' )
if not isinstance(__UpperCamelCase ,(float, int) ):
raise ValueError('differentiate() requires a float as input for position' )
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise ValueError('differentiate() requires an int as input for order' )
lowerCamelCase_ = Dual(__UpperCamelCase ,1 )
lowerCamelCase_ = func(__UpperCamelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def _UpperCamelCase ( __UpperCamelCase ) -> str:
return y**2 * y**4
print(differentiate(f, 9, 2))
| 42 |
'''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
),
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'xlm-prophetnet'
SCREAMING_SNAKE_CASE_ = ['past_key_values']
SCREAMING_SNAKE_CASE_ = {
'num_attention_heads': 'num_encoder_attention_heads',
}
def __init__( self , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = "gelu" , SCREAMING_SNAKE_CASE_ = 30522 , SCREAMING_SNAKE_CASE_ = 1024 , SCREAMING_SNAKE_CASE_ = 4096 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 4096 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 0.02 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 128 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 2 , **SCREAMING_SNAKE_CASE_ , ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = encoder_ffn_dim
lowerCamelCase_ = num_encoder_layers
lowerCamelCase_ = num_encoder_attention_heads
lowerCamelCase_ = decoder_ffn_dim
lowerCamelCase_ = num_decoder_layers
lowerCamelCase_ = num_decoder_attention_heads
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = init_std # Normal(0, this parameter)
lowerCamelCase_ = activation_function
# parameters for xlmprophetnet
lowerCamelCase_ = ngram
lowerCamelCase_ = num_buckets
lowerCamelCase_ = relative_max_distance
lowerCamelCase_ = disable_ngram_loss
lowerCamelCase_ = eps
# 3 Types of Dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = dropout
lowerCamelCase_ = use_cache
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , add_cross_attention=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'
' `num_decoder_layers`.' )
| 42 | 1 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=33 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ) -> int:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = EsmModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = EsmForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = EsmForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = ()
SCREAMING_SNAKE_CASE_ = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = True
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = EsmModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase_ = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = EsmModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase_ = EsmEmbeddings(config=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCamelCase_ = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCamelCase_ = create_position_ids_from_input_ids(SCREAMING_SNAKE_CASE_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase_ = EsmEmbeddings(config=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.empty(2 , 4 , 30 )
lowerCamelCase_ = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCamelCase_ = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCamelCase_ = embeddings.create_position_ids_from_inputs_embeds(SCREAMING_SNAKE_CASE_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('Esm does not support embedding resizing' )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
with torch.no_grad():
lowerCamelCase_ = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowerCamelCase_ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )[0]
lowerCamelCase_ = 33
lowerCamelCase_ = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
@slow
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
lowerCamelCase_ = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowerCamelCase_ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )[0]
# compare the actual values for a slice.
lowerCamelCase_ = torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 42 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> float:
lowerCamelCase_ = x
lowerCamelCase_ = y
for step in range(__UpperCamelCase ): # noqa: B007
lowerCamelCase_ = a * a - b * b + x
lowerCamelCase_ = 2 * a * b + y
lowerCamelCase_ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _UpperCamelCase ( __UpperCamelCase ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def _UpperCamelCase ( __UpperCamelCase ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(__UpperCamelCase ,1 ,1 ) )
def _UpperCamelCase ( __UpperCamelCase = 8_00 ,__UpperCamelCase = 6_00 ,__UpperCamelCase = -0.6 ,__UpperCamelCase = 0 ,__UpperCamelCase = 3.2 ,__UpperCamelCase = 50 ,__UpperCamelCase = True ,) -> Image.Image:
lowerCamelCase_ = Image.new('RGB' ,(image_width, image_height) )
lowerCamelCase_ = img.load()
# loop through the image-coordinates
for image_x in range(__UpperCamelCase ):
for image_y in range(__UpperCamelCase ):
# determine the figure-coordinates based on the image-coordinates
lowerCamelCase_ = figure_width / image_width * image_height
lowerCamelCase_ = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCamelCase_ = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCamelCase_ = get_distance(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCamelCase_ = get_color_coded_rgb(__UpperCamelCase )
else:
lowerCamelCase_ = get_black_and_white_rgb(__UpperCamelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
A_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 42 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import gcd
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = 2 ,__UpperCamelCase = 1 ,__UpperCamelCase = 3 ,) -> int | None:
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int:
return (pow(__UpperCamelCase ,2 ) + step) % modulus
for _ in range(__UpperCamelCase ):
# These track the position within the cycle detection logic.
lowerCamelCase_ = seed
lowerCamelCase_ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowerCamelCase_ = rand_fn(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
lowerCamelCase_ = rand_fn(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
lowerCamelCase_ = rand_fn(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowerCamelCase_ = gcd(hare - tortoise ,__UpperCamelCase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowerCamelCase_ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
A_ = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
A_ = parser.parse_args()
A_ = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f'''{args.num} is probably prime''')
else:
A_ = args.num // divisor
print(f'''{args.num} = {divisor} * {quotient}''')
| 42 |
'''simple docstring'''
from math import isclose, sqrt
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> tuple[float, float, float]:
lowerCamelCase_ = point_y / 4 / point_x
lowerCamelCase_ = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCamelCase_ = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCamelCase_ = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCamelCase_ = outgoing_gradient**2 + 4
lowerCamelCase_ = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCamelCase_ = (point_y - outgoing_gradient * point_x) ** 2 - 1_00
lowerCamelCase_ = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCamelCase_ = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCamelCase_ = x_minus if isclose(__UpperCamelCase ,__UpperCamelCase ) else x_plus
lowerCamelCase_ = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _UpperCamelCase ( __UpperCamelCase = 1.4 ,__UpperCamelCase = -9.6 ) -> int:
lowerCamelCase_ = 0
lowerCamelCase_ = first_x_coord
lowerCamelCase_ = first_y_coord
lowerCamelCase_ = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = next_point(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 42 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
A_ = logging.get_logger("transformers.models.encodec")
A_ = {
"quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
"quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
"quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
"quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
}
A_ = {
"encoder.model.0.conv.conv": "encoder.layers.0.conv",
"encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
"encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
"encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
"encoder.model.3.conv.conv": "encoder.layers.3.conv",
"encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
"encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
"encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
"encoder.model.6.conv.conv": "encoder.layers.6.conv",
"encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
"encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
"encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
"encoder.model.9.conv.conv": "encoder.layers.9.conv",
"encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
"encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
"encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
"encoder.model.12.conv.conv": "encoder.layers.12.conv",
"encoder.model.13.lstm": "encoder.layers.13.lstm",
"encoder.model.15.conv.conv": "encoder.layers.15.conv",
}
A_ = {
"encoder.model.0.conv.norm": "encoder.layers.0.norm",
"encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
"encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
"encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
"encoder.model.3.conv.norm": "encoder.layers.3.norm",
"encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
"encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
"encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
"encoder.model.6.conv.norm": "encoder.layers.6.norm",
"encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
"encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
"encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
"encoder.model.9.conv.norm": "encoder.layers.9.norm",
"encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
"encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
"encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
"encoder.model.12.conv.norm": "encoder.layers.12.norm",
"encoder.model.15.conv.norm": "encoder.layers.15.norm",
}
A_ = {
"decoder.model.0.conv.conv": "decoder.layers.0.conv",
"decoder.model.1.lstm": "decoder.layers.1.lstm",
"decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
"decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
"decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
"decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
"decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
"decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
"decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
"decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
"decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
"decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
"decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
"decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
"decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
"decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
"decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
"decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
"decoder.model.15.conv.conv": "decoder.layers.15.conv",
}
A_ = {
"decoder.model.0.conv.norm": "decoder.layers.0.norm",
"decoder.model.3.convtr.norm": "decoder.layers.3.norm",
"decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
"decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
"decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
"decoder.model.6.convtr.norm": "decoder.layers.6.norm",
"decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
"decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
"decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
"decoder.model.9.convtr.norm": "decoder.layers.9.norm",
"decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
"decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
"decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
"decoder.model.12.convtr.norm": "decoder.layers.12.norm",
"decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
"decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
"decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
"decoder.model.15.conv.norm": "decoder.layers.15.norm",
}
A_ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
A_ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
A_ = []
A_ = []
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict:
for attribute in key.split('.' ):
lowerCamelCase_ = getattr(__UpperCamelCase ,__UpperCamelCase )
if weight_type is not None:
lowerCamelCase_ = getattr(__UpperCamelCase ,__UpperCamelCase ).shape
else:
lowerCamelCase_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowerCamelCase_ = value
elif weight_type == "weight_g":
lowerCamelCase_ = value
elif weight_type == "weight_v":
lowerCamelCase_ = value
elif weight_type == "bias":
lowerCamelCase_ = value
elif weight_type == "running_mean":
lowerCamelCase_ = value
elif weight_type == "running_var":
lowerCamelCase_ = value
elif weight_type == "num_batches_tracked":
lowerCamelCase_ = value
elif weight_type == "weight_ih_l0":
lowerCamelCase_ = value
elif weight_type == "weight_hh_l0":
lowerCamelCase_ = value
elif weight_type == "bias_ih_l0":
lowerCamelCase_ = value
elif weight_type == "bias_hh_l0":
lowerCamelCase_ = value
elif weight_type == "weight_ih_l1":
lowerCamelCase_ = value
elif weight_type == "weight_hh_l1":
lowerCamelCase_ = value
elif weight_type == "bias_ih_l1":
lowerCamelCase_ = value
elif weight_type == "bias_hh_l1":
lowerCamelCase_ = value
else:
lowerCamelCase_ = value
logger.info(f'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> List[str]:
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCamelCase_ ,lowerCamelCase_ = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
lowerCamelCase_ = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCamelCase_ = MAPPING_24K
elif model_name == "encodec_48khz":
lowerCamelCase_ = MAPPING_48K
else:
raise ValueError(f'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(__UpperCamelCase ,__UpperCamelCase ):
logger.info(f'''{name} was ignored''' )
continue
lowerCamelCase_ = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCamelCase_ ,lowerCamelCase_ = key.split('.*.' )
if prefix in name and suffix in name:
lowerCamelCase_ = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
lowerCamelCase_ = True
if "*" in mapped_key:
lowerCamelCase_ = name.split(__UpperCamelCase )[0].split('.' )[-2]
lowerCamelCase_ = mapped_key.replace('*' ,__UpperCamelCase )
if "weight_g" in name:
lowerCamelCase_ = 'weight_g'
elif "weight_v" in name:
lowerCamelCase_ = 'weight_v'
elif "weight_ih_l0" in name:
lowerCamelCase_ = 'weight_ih_l0'
elif "weight_hh_l0" in name:
lowerCamelCase_ = 'weight_hh_l0'
elif "bias_ih_l0" in name:
lowerCamelCase_ = 'bias_ih_l0'
elif "bias_hh_l0" in name:
lowerCamelCase_ = 'bias_hh_l0'
elif "weight_ih_l1" in name:
lowerCamelCase_ = 'weight_ih_l1'
elif "weight_hh_l1" in name:
lowerCamelCase_ = 'weight_hh_l1'
elif "bias_ih_l1" in name:
lowerCamelCase_ = 'bias_ih_l1'
elif "bias_hh_l1" in name:
lowerCamelCase_ = 'bias_hh_l1'
elif "bias" in name:
lowerCamelCase_ = 'bias'
elif "weight" in name:
lowerCamelCase_ = 'weight'
elif "running_mean" in name:
lowerCamelCase_ = 'running_mean'
elif "running_var" in name:
lowerCamelCase_ = 'running_var'
elif "num_batches_tracked" in name:
lowerCamelCase_ = 'num_batches_tracked'
else:
lowerCamelCase_ = None
set_recursively(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ,__UpperCamelCase=None ,) -> Any:
if config_path is not None:
lowerCamelCase_ = EncodecConfig.from_pretrained(__UpperCamelCase )
else:
lowerCamelCase_ = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCamelCase_ = [8, 5, 4, 4]
lowerCamelCase_ = [2.2]
lowerCamelCase_ = 64
lowerCamelCase_ = 3_20_00
lowerCamelCase_ = 20_48
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
elif model_name == "encodec_48khz":
lowerCamelCase_ = [8, 5, 4, 2]
lowerCamelCase_ = [3.0, 6.0, 12.0, 24.0]
lowerCamelCase_ = 4_80_00
lowerCamelCase_ = 2
lowerCamelCase_ = False
lowerCamelCase_ = 'time_group_norm'
lowerCamelCase_ = True
lowerCamelCase_ = 1.0
lowerCamelCase_ = 0.01
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCamelCase_ = EncodecModel(__UpperCamelCase )
lowerCamelCase_ = EncodecFeatureExtractor(
feature_size=config.audio_channels ,sampling_rate=config.sampling_rate ,chunk_length_s=config.chunk_length_s ,overlap=config.overlap ,)
feature_extractor.save_pretrained(__UpperCamelCase )
lowerCamelCase_ = torch.load(__UpperCamelCase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCamelCase_ = original_checkpoint['best_state']
recursively_load_weights(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(__UpperCamelCase )
model.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
"--model",
default="encodec_24khz",
type=str,
help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
A_ = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 42 |
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = False ) -> bool:
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_31_70_44_06_46_79_88_73_85_96_19_81 and not allow_probable:
raise ValueError(
'Warning: upper bound of deterministic test is exceeded. '
'Pass allow_probable=True to allow probabilistic test. '
'A return value of True indicates a probable prime.' )
# array bounds provided by analysis
lowerCamelCase_ = [
20_47,
1_37_36_53,
25_32_60_01,
32_15_03_17_51,
2_15_23_02_89_87_47,
3_47_47_49_66_03_83,
3_41_55_00_71_72_83_21,
1,
3_82_51_23_05_65_46_41_30_51,
1,
1,
31_86_65_85_78_34_03_11_51_16_74_61,
3_31_70_44_06_46_79_88_73_85_96_19_81,
]
lowerCamelCase_ = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(__UpperCamelCase ,1 ):
if n < _p:
# then we have our last prime to check
lowerCamelCase_ = primes[:idx]
break
lowerCamelCase_ ,lowerCamelCase_ = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowerCamelCase_ = False
for r in range(__UpperCamelCase ):
lowerCamelCase_ = pow(__UpperCamelCase ,d * 2**r ,__UpperCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowerCamelCase_ = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def _UpperCamelCase ( ) -> None:
assert not miller_rabin(5_61 )
assert miller_rabin(5_63 )
# 2047
assert not miller_rabin(83_82_01 )
assert miller_rabin(83_82_07 )
# 1_373_653
assert not miller_rabin(17_31_60_01 )
assert miller_rabin(17_31_60_17 )
# 25_326_001
assert not miller_rabin(30_78_38_66_41 )
assert miller_rabin(30_78_38_66_53 )
# 3_215_031_751
assert not miller_rabin(1_71_30_45_57_48_01 )
assert miller_rabin(1_71_30_45_57_48_19 )
# 2_152_302_898_747
assert not miller_rabin(2_77_97_99_72_83_07 )
assert miller_rabin(2_77_97_99_72_83_27 )
# 3_474_749_660_383
assert not miller_rabin(1_13_85_00_23_90_94_41 )
assert miller_rabin(1_13_85_00_23_90_95_27 )
# 341_550_071_728_321
assert not miller_rabin(1_27_50_41_01_88_48_80_43_51 )
assert miller_rabin(1_27_50_41_01_88_48_80_43_91 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_96_66_46_44_58_50_77_87_79_18_67 )
assert miller_rabin(7_96_66_46_44_58_50_77_87_79_19_51 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(55_28_40_67_74_46_64_78_97_66_03_33 )
assert miller_rabin(55_28_40_67_74_46_64_78_97_66_03_59 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 42 | 1 |
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase = 1_00_00_00 ) -> int:
lowerCamelCase_ = 1
lowerCamelCase_ = 1
lowerCamelCase_ = {1: 1}
for inputa in range(2 ,__UpperCamelCase ):
lowerCamelCase_ = 0
lowerCamelCase_ = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCamelCase_ = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCamelCase_ = counter
if counter > pre_counter:
lowerCamelCase_ = inputa
lowerCamelCase_ = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 42 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A_ = 16
A_ = 32
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = 16 ,__UpperCamelCase = "bert-base-cased" ) -> List[Any]:
lowerCamelCase_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
lowerCamelCase_ = load_dataset('glue' ,'mrpc' )
def tokenize_function(__UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ = tokenizer(examples['sentence1'] ,examples['sentence2'] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase_ = datasets.map(
__UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=['idx', 'sentence1', 'sentence2'] ,load_from_cache_file=__UpperCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ = tokenized_datasets.rename_column('label' ,'labels' )
def collate_fn(__UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCamelCase ,padding='max_length' ,max_length=1_28 ,return_tensors='pt' )
return tokenizer.pad(__UpperCamelCase ,padding='longest' ,return_tensors='pt' )
# Instantiate dataloaders.
lowerCamelCase_ = DataLoader(
tokenized_datasets['train'] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
lowerCamelCase_ = DataLoader(
tokenized_datasets['validation'] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
model.eval()
lowerCamelCase_ = 0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ = model(**__UpperCamelCase )
lowerCamelCase_ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCamelCase_ ,lowerCamelCase_ = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__UpperCamelCase ) - 1:
lowerCamelCase_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCamelCase_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__UpperCamelCase ,references=__UpperCamelCase ,)
lowerCamelCase_ = metric.compute()
return eval_metric["accuracy"]
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> List[str]:
# Initialize accelerator
lowerCamelCase_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ = config['lr']
lowerCamelCase_ = int(config['num_epochs'] )
lowerCamelCase_ = int(config['seed'] )
lowerCamelCase_ = int(config['batch_size'] )
lowerCamelCase_ = args.model_name_or_path
set_seed(__UpperCamelCase )
lowerCamelCase_ ,lowerCamelCase_ = get_dataloaders(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained(__UpperCamelCase ,return_dict=__UpperCamelCase )
# Instantiate optimizer
lowerCamelCase_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase_ = optimizer_cls(params=model.parameters() ,lr=__UpperCamelCase )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase_ = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowerCamelCase_ = 1
lowerCamelCase_ = (len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase_ = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase ,num_warmup_steps=0 ,num_training_steps=__UpperCamelCase ,)
else:
lowerCamelCase_ = DummyScheduler(__UpperCamelCase ,total_num_steps=__UpperCamelCase ,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = accelerator.prepare(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase_ = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase_ = 0
lowerCamelCase_ = evaluate.load('glue' ,'mrpc' )
lowerCamelCase_ = num_epochs
if args.partial_train_epoch is not None:
lowerCamelCase_ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCamelCase_ = args.resume_from_checkpoint.split('epoch_' )[1]
lowerCamelCase_ = ''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCamelCase_ = int(__UpperCamelCase ) + 1
lowerCamelCase_ = evaluation_loop(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
accelerator.print('resumed checkpoint performance:' ,__UpperCamelCase )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' ,lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' ,optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir ,f'''state_{starting_epoch-1}.json''' ) ,'r' ) as f:
lowerCamelCase_ = json.load(__UpperCamelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCamelCase_ = {}
for epoch in range(__UpperCamelCase ,__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
lowerCamelCase_ = model(**__UpperCamelCase )
lowerCamelCase_ = outputs.loss
lowerCamelCase_ = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCamelCase_ = f'''epoch_{epoch}'''
lowerCamelCase_ = os.path.join(args.output_dir ,__UpperCamelCase )
accelerator.save_state(__UpperCamelCase )
lowerCamelCase_ = evaluation_loop(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
lowerCamelCase_ = accuracy
lowerCamelCase_ = lr_scheduler.get_lr()[0]
lowerCamelCase_ = optimizer.param_groups[0]['lr']
lowerCamelCase_ = epoch
lowerCamelCase_ = overall_step
accelerator.print(f'''epoch {epoch}:''' ,__UpperCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,f'''state_{epoch}.json''' ) ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCamelCase ( ) -> str:
lowerCamelCase_ = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' ,type=__UpperCamelCase ,default='bert-base-cased' ,help='Path to pretrained model or model identifier from huggingface.co/models.' ,required=__UpperCamelCase ,)
parser.add_argument(
'--output_dir' ,type=__UpperCamelCase ,default='.' ,help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' ,)
parser.add_argument(
'--resume_from_checkpoint' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help='If the training should continue from a checkpoint folder.' ,)
parser.add_argument(
'--partial_train_epoch' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help='If passed, the training will stop after this number of epochs.' ,)
parser.add_argument(
'--num_epochs' ,type=__UpperCamelCase ,default=2 ,help='Number of train epochs.' ,)
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
main()
| 42 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'openai-gpt'
SCREAMING_SNAKE_CASE_ = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , SCREAMING_SNAKE_CASE_=40478 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_="cls_index" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.1 , **SCREAMING_SNAKE_CASE_ , ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = vocab_size
lowerCamelCase_ = n_positions
lowerCamelCase_ = n_embd
lowerCamelCase_ = n_layer
lowerCamelCase_ = n_head
lowerCamelCase_ = afn
lowerCamelCase_ = resid_pdrop
lowerCamelCase_ = embd_pdrop
lowerCamelCase_ = attn_pdrop
lowerCamelCase_ = layer_norm_epsilon
lowerCamelCase_ = initializer_range
lowerCamelCase_ = summary_type
lowerCamelCase_ = summary_use_proj
lowerCamelCase_ = summary_activation
lowerCamelCase_ = summary_first_dropout
lowerCamelCase_ = summary_proj_to_labels
super().__init__(**SCREAMING_SNAKE_CASE_ )
| 42 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> np.ndarray:
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
lowerCamelCase_ = ksize + 1
lowerCamelCase_ = np.zeros((ksize, ksize) ,dtype=np.floataa )
# each value
for y in range(__UpperCamelCase ):
for x in range(__UpperCamelCase ):
# distance from center
lowerCamelCase_ = x - ksize // 2
lowerCamelCase_ = y - ksize // 2
# degree to radiant
lowerCamelCase_ = theta / 1_80 * np.pi
lowerCamelCase_ = np.cos(_theta )
lowerCamelCase_ = np.sin(_theta )
# get kernel x
lowerCamelCase_ = cos_theta * px + sin_theta * py
# get kernel y
lowerCamelCase_ = -sin_theta * px + cos_theta * py
# fill kernel
lowerCamelCase_ = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
A_ = imread("../image_data/lena.jpg")
# turn image in gray scale value
A_ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
A_ = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
A_ = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
A_ = out / out.max() * 255
A_ = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 42 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE_ = 'ViltImageProcessor'
SCREAMING_SNAKE_CASE_ = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
lowerCamelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = kwargs.pop('feature_extractor' )
lowerCamelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.image_processor
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> BatchEncoding:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(
text=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_overflowing_tokens=SCREAMING_SNAKE_CASE_ , return_special_tokens_mask=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , return_length=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# add pixel_values + pixel_mask
lowerCamelCase_ = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
encoding.update(SCREAMING_SNAKE_CASE_ )
return encoding
def UpperCamelCase( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer.model_input_names
lowerCamelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , SCREAMING_SNAKE_CASE_ , )
return self.image_processor_class
@property
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , SCREAMING_SNAKE_CASE_ , )
return self.image_processor
| 42 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = ['transformers', 'torch', 'note_seq']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def UpperCamelCase( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def UpperCamelCase( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 42 | 1 |
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = False ) -> bool:
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_31_70_44_06_46_79_88_73_85_96_19_81 and not allow_probable:
raise ValueError(
'Warning: upper bound of deterministic test is exceeded. '
'Pass allow_probable=True to allow probabilistic test. '
'A return value of True indicates a probable prime.' )
# array bounds provided by analysis
lowerCamelCase_ = [
20_47,
1_37_36_53,
25_32_60_01,
32_15_03_17_51,
2_15_23_02_89_87_47,
3_47_47_49_66_03_83,
3_41_55_00_71_72_83_21,
1,
3_82_51_23_05_65_46_41_30_51,
1,
1,
31_86_65_85_78_34_03_11_51_16_74_61,
3_31_70_44_06_46_79_88_73_85_96_19_81,
]
lowerCamelCase_ = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(__UpperCamelCase ,1 ):
if n < _p:
# then we have our last prime to check
lowerCamelCase_ = primes[:idx]
break
lowerCamelCase_ ,lowerCamelCase_ = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowerCamelCase_ = False
for r in range(__UpperCamelCase ):
lowerCamelCase_ = pow(__UpperCamelCase ,d * 2**r ,__UpperCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowerCamelCase_ = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def _UpperCamelCase ( ) -> None:
assert not miller_rabin(5_61 )
assert miller_rabin(5_63 )
# 2047
assert not miller_rabin(83_82_01 )
assert miller_rabin(83_82_07 )
# 1_373_653
assert not miller_rabin(17_31_60_01 )
assert miller_rabin(17_31_60_17 )
# 25_326_001
assert not miller_rabin(30_78_38_66_41 )
assert miller_rabin(30_78_38_66_53 )
# 3_215_031_751
assert not miller_rabin(1_71_30_45_57_48_01 )
assert miller_rabin(1_71_30_45_57_48_19 )
# 2_152_302_898_747
assert not miller_rabin(2_77_97_99_72_83_07 )
assert miller_rabin(2_77_97_99_72_83_27 )
# 3_474_749_660_383
assert not miller_rabin(1_13_85_00_23_90_94_41 )
assert miller_rabin(1_13_85_00_23_90_95_27 )
# 341_550_071_728_321
assert not miller_rabin(1_27_50_41_01_88_48_80_43_51 )
assert miller_rabin(1_27_50_41_01_88_48_80_43_91 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_96_66_46_44_58_50_77_87_79_18_67 )
assert miller_rabin(7_96_66_46_44_58_50_77_87_79_19_51 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(55_28_40_67_74_46_64_78_97_66_03_33 )
assert miller_rabin(55_28_40_67_74_46_64_78_97_66_03_59 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 42 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return model
@property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , cross_attention_dim=10 , )
return model
@property
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , )
lowerCamelCase_ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return vqvae, unet
@slow
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
lowerCamelCase_ = DDPMScheduler()
lowerCamelCase_ = AudioDiffusionPipeline(vqvae=SCREAMING_SNAKE_CASE_ , unet=self.dummy_unet , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , steps=4 )
lowerCamelCase_ = output.audios[0]
lowerCamelCase_ = output.images[0]
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , steps=4 , return_dict=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.frombuffer(image_from_tuple.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowerCamelCase_ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
lowerCamelCase_ = DDIMScheduler()
lowerCamelCase_ = self.dummy_vqvae_and_unet
lowerCamelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
np.random.seed(0 )
lowerCamelCase_ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(raw_audio=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , start_step=5 , steps=10 )
lowerCamelCase_ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowerCamelCase_ = self.dummy_unet_condition
lowerCamelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=SCREAMING_SNAKE_CASE_ , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
np.random.seed(0 )
lowerCamelCase_ = torch.rand((1, 1, 10) )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , encoding=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output.images[0]
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = torch_device
lowerCamelCase_ = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output.audios[0]
lowerCamelCase_ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 42 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = OpenAIGPTTokenizer
SCREAMING_SNAKE_CASE_ = OpenAIGPTTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase_ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
lowerCamelCase_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCamelCase_ = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE_ ) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
return "lower newer", "lower newer"
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowerCamelCase_ = 'lower'
lowerCamelCase_ = ['low', 'er</w>']
lowerCamelCase_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokens + ['<unk>']
lowerCamelCase_ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_=15 ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# Simple input
lowerCamelCase_ = 'This is a simple input'
lowerCamelCase_ = ['This is a simple input 1', 'This is a simple input 2']
lowerCamelCase_ = ('This is a simple input', 'This is a pair')
lowerCamelCase_ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE_ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE_ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' , )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
pass
| 42 |
'''simple docstring'''
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _UpperCamelCase ( __UpperCamelCase = 8 ) -> str:
lowerCamelCase_ = ascii_letters + digits + punctuation
return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(__UpperCamelCase )
lowerCamelCase_ = i // 3
lowerCamelCase_ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowerCamelCase_ = (
chars_incl
+ random(__UpperCamelCase ,quotient + remainder )
+ random(__UpperCamelCase ,__UpperCamelCase )
+ random(__UpperCamelCase ,__UpperCamelCase )
)
lowerCamelCase_ = list(__UpperCamelCase )
shuffle(__UpperCamelCase )
return "".join(__UpperCamelCase )
# random is a generalised function for letters, characters and numbers
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
pass # Put your code here...
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Tuple:
pass # Put your code here...
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
pass # Put your code here...
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = 8 ) -> bool:
if len(__UpperCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
lowerCamelCase_ = any(char in ascii_uppercase for char in password )
lowerCamelCase_ = any(char in ascii_lowercase for char in password )
lowerCamelCase_ = any(char in digits for char in password )
lowerCamelCase_ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _UpperCamelCase ( ) -> Optional[int]:
lowerCamelCase_ = int(input('Please indicate the max length of your password: ' ).strip() )
lowerCamelCase_ = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' ,password_generator(__UpperCamelCase ) )
print(
'Alternative Password generated:' ,alternative_password_generator(__UpperCamelCase ,__UpperCamelCase ) ,)
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 42 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE_=[2, 2, 3, 2] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=["stage2", "stage3", "stage4"] , SCREAMING_SNAKE_CASE_=[2, 3, 4] , SCREAMING_SNAKE_CASE_=None , ) -> Any:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = num_stages
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = depths
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = num_labels
lowerCamelCase_ = initializer_range
lowerCamelCase_ = out_features
lowerCamelCase_ = out_indices
lowerCamelCase_ = scope
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
lowerCamelCase_ = ConvNextModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
lowerCamelCase_ = ConvNextForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = ConvNextBackbone(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase_ = None
lowerCamelCase_ = ConvNextBackbone(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = ConvNextModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
pass
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase_ = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> int:
'''simple docstring'''
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = ConvNextModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _UpperCamelCase ( ) -> List[Any]:
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
lowerCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.tensor([-0.0_260, -0.4_739, 0.1_911] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
@require_torch
class UpperCAmelCase ( unittest.TestCase , UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = (ConvNextBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ = ConvNextConfig
SCREAMING_SNAKE_CASE_ = False
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = ConvNextModelTester(self )
| 42 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = inspect.getfile(accelerate.test_utils )
lowerCamelCase_ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase_ = test_metrics
@require_cpu
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
debug_launcher(self.test_metrics.main )
@require_single_gpu
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
print(f'''Found {torch.cuda.device_count()} devices.''' )
lowerCamelCase_ = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
| 42 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = StableDiffusionXLImgaImgPipeline
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
SCREAMING_SNAKE_CASE_ = PipelineTesterMixin.required_optional_params - {'latents'}
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=SCREAMING_SNAKE_CASE_ , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowerCamelCase_ = EulerDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , )
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=32 , )
lowerCamelCase_ = CLIPTextModel(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = image / 2 + 0.5
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
lowerCamelCase_ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableDiffusionXLImgaImgPipeline(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = sd_pipe(**SCREAMING_SNAKE_CASE_ ).images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase_ = np.array([0.4_656, 0.4_840, 0.4_439, 0.6_698, 0.5_574, 0.4_524, 0.5_799, 0.5_943, 0.5_165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableDiffusionXLImgaImgPipeline(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
# forward without prompt embeds
lowerCamelCase_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 3 * ['this is a negative prompt']
lowerCamelCase_ = negative_prompt
lowerCamelCase_ = 3 * [inputs['prompt']]
lowerCamelCase_ = sd_pipe(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCamelCase_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 3 * ['this is a negative prompt']
lowerCamelCase_ = 3 * [inputs.pop('prompt' )]
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) = sd_pipe.encode_prompt(SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = sd_pipe(
**SCREAMING_SNAKE_CASE_ , prompt_embeds=SCREAMING_SNAKE_CASE_ , negative_prompt_embeds=SCREAMING_SNAKE_CASE_ , pooled_prompt_embeds=SCREAMING_SNAKE_CASE_ , negative_pooled_prompt_embeds=SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="cpu" , SCREAMING_SNAKE_CASE_=torch.floataa , SCREAMING_SNAKE_CASE_=0 ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = np.random.RandomState(SCREAMING_SNAKE_CASE_ ).standard_normal((1, 4, 64, 64) )
lowerCamelCase_ = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.get_inputs(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe(**SCREAMING_SNAKE_CASE_ ).images
lowerCamelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ = np.array([0.49_493, 0.47_896, 0.40_798, 0.54_214, 0.53_212, 0.48_202, 0.47_656, 0.46_329, 0.48_506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 42 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def _UpperCamelCase ( __UpperCamelCase ) -> List[str]:
if hor == 1_28:
lowerCamelCase_ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
lowerCamelCase_ = (32, 1_28, 2_56)
lowerCamelCase_ = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
lowerCamelCase_ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
lowerCamelCase_ = (32, 64, 1_28, 2_56)
lowerCamelCase_ = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
lowerCamelCase_ = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
lowerCamelCase_ = model.state_dict()
lowerCamelCase_ = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 6_55_36,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
lowerCamelCase_ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase_ = dict(zip(model.state_dict().keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase_ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() ,f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCamelCase ( ) -> Tuple:
lowerCamelCase_ = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 1_28, 2_56),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 6_55_36,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
lowerCamelCase_ = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
lowerCamelCase_ = model
lowerCamelCase_ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase_ = dict(zip(state_dict.keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase_ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() ,'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 42 | 1 |
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
A_ = get_logger(__name__)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ = None ) -> str:
'''simple docstring'''
lowerCamelCase_ = (
os.path.join(SCREAMING_SNAKE_CASE_ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
lowerCamelCase_ = Extractor
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
lowerCamelCase_ = os.path.abspath(SCREAMING_SNAKE_CASE_ )
return os.path.join(self.extract_dir , hash_url_to_filename(SCREAMING_SNAKE_CASE_ ) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> bool:
'''simple docstring'''
return force_extract or (
not os.path.isfile(SCREAMING_SNAKE_CASE_ ) and not (os.path.isdir(SCREAMING_SNAKE_CASE_ ) and os.listdir(SCREAMING_SNAKE_CASE_ ))
)
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False ) -> str:
'''simple docstring'''
lowerCamelCase_ = self.extractor.infer_extractor_format(SCREAMING_SNAKE_CASE_ )
if not extractor_format:
return input_path
lowerCamelCase_ = self._get_output_path(SCREAMING_SNAKE_CASE_ )
if self._do_extract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.extractor.extract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return output_path
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@classmethod
@abstractmethod
def UpperCamelCase( cls , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> bool:
'''simple docstring'''
...
@staticmethod
@abstractmethod
def UpperCamelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
...
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = []
@staticmethod
def UpperCamelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ , 'rb' ) as f:
return f.read(SCREAMING_SNAKE_CASE_ )
@classmethod
def UpperCamelCase( cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = b"" ) -> bool:
'''simple docstring'''
if not magic_number:
lowerCamelCase_ = max(len(SCREAMING_SNAKE_CASE_ ) for cls_magic_number in cls.magic_numbers )
try:
lowerCamelCase_ = cls.read_magic_number(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except OSError:
return False
return any(magic_number.startswith(SCREAMING_SNAKE_CASE_ ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@classmethod
def UpperCamelCase( cls , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> bool:
'''simple docstring'''
return tarfile.is_tarfile(SCREAMING_SNAKE_CASE_ )
@staticmethod
def UpperCamelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
def resolved(SCREAMING_SNAKE_CASE_ ) -> str:
return os.path.realpath(os.path.abspath(SCREAMING_SNAKE_CASE_ ) )
def badpath(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ).startswith(SCREAMING_SNAKE_CASE_ )
def badlink(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> bool:
# Links are interpreted relative to the directory containing the link
lowerCamelCase_ = resolved(os.path.join(SCREAMING_SNAKE_CASE_ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = resolved(SCREAMING_SNAKE_CASE_ )
for finfo in members:
if badpath(finfo.name , SCREAMING_SNAKE_CASE_ ):
logger.error(f'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
logger.error(f'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
logger.error(f'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def UpperCamelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tarfile.open(SCREAMING_SNAKE_CASE_ )
tar_file.extractall(SCREAMING_SNAKE_CASE_ , members=TarExtractor.safemembers(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
tar_file.close()
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = [b'\x1F\x8B']
@staticmethod
def UpperCamelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
with gzip.open(SCREAMING_SNAKE_CASE_ , 'rb' ) as gzip_file:
with open(SCREAMING_SNAKE_CASE_ , 'wb' ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = [
b'PK\x03\x04',
b'PK\x05\x06', # empty archive
b'PK\x07\x08', # spanned archive
]
@classmethod
def UpperCamelCase( cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = b"" ) -> bool:
'''simple docstring'''
if super().is_extractable(SCREAMING_SNAKE_CASE_ , magic_number=SCREAMING_SNAKE_CASE_ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(SCREAMING_SNAKE_CASE_ , 'rb' ) as fp:
lowerCamelCase_ = _EndRecData(SCREAMING_SNAKE_CASE_ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
lowerCamelCase_ = fp.read(SCREAMING_SNAKE_CASE_ ) # CD is where we expect it to be
if len(SCREAMING_SNAKE_CASE_ ) == sizeCentralDir:
lowerCamelCase_ = struct.unpack(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCamelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ , 'r' ) as zip_file:
zip_file.extractall(SCREAMING_SNAKE_CASE_ )
zip_file.close()
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = [b'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def UpperCamelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
with lzma.open(SCREAMING_SNAKE_CASE_ ) as compressed_file:
with open(SCREAMING_SNAKE_CASE_ , 'wb' ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = [b'Rar!\x1a\x07\x00', b'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def UpperCamelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError('Please pip install rarfile' )
import rarfile
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = rarfile.RarFile(SCREAMING_SNAKE_CASE_ )
rf.extractall(SCREAMING_SNAKE_CASE_ )
rf.close()
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = [b'\x28\xb5\x2F\xFD']
@staticmethod
def UpperCamelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('Please pip install zstandard' )
import zstandard as zstd
lowerCamelCase_ = zstd.ZstdDecompressor()
with open(SCREAMING_SNAKE_CASE_ , 'rb' ) as ifh, open(SCREAMING_SNAKE_CASE_ , 'wb' ) as ofh:
dctx.copy_stream(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = [b'\x42\x5A\x68']
@staticmethod
def UpperCamelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
with bza.open(SCREAMING_SNAKE_CASE_ , 'rb' ) as compressed_file:
with open(SCREAMING_SNAKE_CASE_ , 'wb' ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = [b'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def UpperCamelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError('Please pip install py7zr' )
import pyazr
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
with pyazr.SevenZipFile(SCREAMING_SNAKE_CASE_ , 'r' ) as archive:
archive.extractall(SCREAMING_SNAKE_CASE_ )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = [b'\x04\x22\x4D\x18']
@staticmethod
def UpperCamelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError('Please pip install lz4' )
import lza.frame
with lza.frame.open(SCREAMING_SNAKE_CASE_ , 'rb' ) as compressed_file:
with open(SCREAMING_SNAKE_CASE_ , 'wb' ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCamelCase( cls ) -> Union[str, Any]:
'''simple docstring'''
return max(
len(SCREAMING_SNAKE_CASE_ )
for extractor in cls.extractors.values()
if issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def UpperCamelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(SCREAMING_SNAKE_CASE_ , magic_number_length=SCREAMING_SNAKE_CASE_ )
except OSError:
return b""
@classmethod
def UpperCamelCase( cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False ) -> bool:
'''simple docstring'''
warnings.warn(
'Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'infer_extractor_format\' instead.' , category=SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = cls.infer_extractor_format(SCREAMING_SNAKE_CASE_ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCamelCase( cls , SCREAMING_SNAKE_CASE_ ) -> str: # <Added version="2.4.0"/>
'''simple docstring'''
lowerCamelCase_ = cls._get_magic_number_max_length()
lowerCamelCase_ = cls._read_magic_number(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(SCREAMING_SNAKE_CASE_ , magic_number=SCREAMING_SNAKE_CASE_ ):
return extractor_format
@classmethod
def UpperCamelCase( cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "deprecated" , ) -> None:
'''simple docstring'''
os.makedirs(os.path.dirname(SCREAMING_SNAKE_CASE_ ) , exist_ok=SCREAMING_SNAKE_CASE_ )
# Prevent parallel extractions
lowerCamelCase_ = str(Path(SCREAMING_SNAKE_CASE_ ).with_suffix('.lock' ) )
with FileLock(SCREAMING_SNAKE_CASE_ ):
shutil.rmtree(SCREAMING_SNAKE_CASE_ , ignore_errors=SCREAMING_SNAKE_CASE_ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): # passed as positional arg
warnings.warn(
'Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'extractor_format\' instead.' , category=SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = extractor if extractor != 'deprecated' else extractor_format
else:
lowerCamelCase_ = cls.extractors[extractor_format]
return extractor.extract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
warnings.warn(
'Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '
'exception in 3.0.0.' , category=SCREAMING_SNAKE_CASE_ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(SCREAMING_SNAKE_CASE_ ):
return extractor.extract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 42 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A_ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A_ = 250_004
A_ = 250_020
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = MBartTokenizer
SCREAMING_SNAKE_CASE_ = MBartTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ = MBartTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = MBartTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCamelCase_ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase_ = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
lowerCamelCase_ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=True
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=False
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'facebook/mbart-large-en-ro'
SCREAMING_SNAKE_CASE_ = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
SCREAMING_SNAKE_CASE_ = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
SCREAMING_SNAKE_CASE_ = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE]
@classmethod
def UpperCamelCase( cls ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
lowerCamelCase_ = 1
return cls
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 250020 )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
self.assertIn(SCREAMING_SNAKE_CASE_ , self.tokenizer.all_special_ids )
lowerCamelCase_ = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
lowerCamelCase_ = self.tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 10
lowerCamelCase_ = self.tokenizer(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [250026, 250001] )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = MBartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , SCREAMING_SNAKE_CASE_ )
@require_torch
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
lowerCamelCase_ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
lowerCamelCase_ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
lowerCamelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(self.src_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=3 , return_tensors='pt' )
lowerCamelCase_ = self.tokenizer(
text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=10 , return_tensors='pt' )
lowerCamelCase_ = targets['input_ids']
lowerCamelCase_ = shift_tokens_right(SCREAMING_SNAKE_CASE_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , {
# A, test, EOS, en_XX
'input_ids': [[62, 3034, 2, 250004]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 250001,
} , )
| 42 | 1 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ) -> Dict:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
lowerCamelCase_ = nn.Parameter(__UpperCamelCase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
lowerCamelCase_ = nn.Parameter(__UpperCamelCase )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[Any]:
# set torch weights for 1-to-1 comparison
lowerCamelCase_ = np.asarray(weights[0] )
lowerCamelCase_ = np.asarray(weights[1] )
lowerCamelCase_ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key ,torch.tensor(__UpperCamelCase ).transpose(1 ,2 ).contiguous().view(-1 ,__UpperCamelCase ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(__UpperCamelCase ).transpose(1 ,2 ).contiguous().view(-1 ,__UpperCamelCase ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(__UpperCamelCase ).view(-1 ,__UpperCamelCase ).contiguous().transpose(0 ,1 ) ,)
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[str]:
# set torch weights for 1-to-1 comparison
lowerCamelCase_ = np.asarray(weights[0] )
lowerCamelCase_ = np.asarray(weights[1] )
lowerCamelCase_ = np.asarray(weights[2] )
lowerCamelCase_ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query ,torch.tensor(__UpperCamelCase ).transpose(1 ,2 ).contiguous().view(-1 ,__UpperCamelCase ) ,)
set_param(
torch_layer.self_attention.key ,torch.tensor(__UpperCamelCase ).transpose(1 ,2 ).contiguous().view(-1 ,__UpperCamelCase ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(__UpperCamelCase ).transpose(1 ,2 ).contiguous().view(-1 ,__UpperCamelCase ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(__UpperCamelCase ).view(-1 ,__UpperCamelCase ).contiguous().transpose(0 ,1 ) ,)
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
# layernorm 1
lowerCamelCase_ = weights[0][0][0]
lowerCamelCase_ = np.asarray(layer_norm_a[0] )
lowerCamelCase_ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm ,torch.tensor(__UpperCamelCase ) ,torch.tensor(__UpperCamelCase ) ,)
# lsh weights + output
lowerCamelCase_ = weights[0][1]
if len(__UpperCamelCase ) < 4:
set_layer_weights_in_torch_lsh(__UpperCamelCase ,torch_block.attention ,__UpperCamelCase )
else:
set_layer_weights_in_torch_local(__UpperCamelCase ,torch_block.attention ,__UpperCamelCase )
# intermediate weighs
lowerCamelCase_ = weights[2][0][1][2]
# Chunked Feed Forward
if len(__UpperCamelCase ) == 4:
lowerCamelCase_ = intermediate_weights[2]
# layernorm 2
lowerCamelCase_ = np.asarray(intermediate_weights[0][0] )
lowerCamelCase_ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm ,torch.tensor(__UpperCamelCase ) ,torch.tensor(__UpperCamelCase ) ,)
# intermediate dense
lowerCamelCase_ = np.asarray(intermediate_weights[1][0] )
lowerCamelCase_ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense ,torch.tensor(__UpperCamelCase ).transpose(0 ,1 ).contiguous() ,torch.tensor(__UpperCamelCase ) ,)
# intermediate out
lowerCamelCase_ = np.asarray(intermediate_weights[4][0] )
lowerCamelCase_ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense ,torch.tensor(__UpperCamelCase ).transpose(0 ,1 ).contiguous() ,torch.tensor(__UpperCamelCase ) ,)
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict:
# reformer model
lowerCamelCase_ = torch_model.reformer
# word embeds
lowerCamelCase_ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings ,torch.tensor(__UpperCamelCase ) ,)
if isinstance(weights[3] ,__UpperCamelCase ):
lowerCamelCase_ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCamelCase_ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
lowerCamelCase_ = nn.Parameter(torch.tensor(__UpperCamelCase ) )
lowerCamelCase_ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__UpperCamelCase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCamelCase_ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# output layer norm
lowerCamelCase_ = np.asarray(weights[7][0] )
lowerCamelCase_ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm ,torch.tensor(__UpperCamelCase ) ,torch.tensor(__UpperCamelCase ) ,)
# output embeddings
lowerCamelCase_ = np.asarray(weights[9][0] )
lowerCamelCase_ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder ,torch.tensor(__UpperCamelCase ).transpose(0 ,1 ).contiguous() ,torch.tensor(__UpperCamelCase ) ,)
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int:
# Initialise PyTorch model
lowerCamelCase_ = ReformerConfig.from_json_file(__UpperCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase_ = ReformerModelWithLMHead(__UpperCamelCase )
with open(__UpperCamelCase ,'rb' ) as f:
lowerCamelCase_ = pickle.load(__UpperCamelCase )['weights']
set_model_weights_in_torch(__UpperCamelCase ,__UpperCamelCase ,config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() ,__UpperCamelCase )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A_ = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 42 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = 'ylacombe/bark-small'
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = 'en_speaker_1'
lowerCamelCase_ = 'This is a test string'
lowerCamelCase_ = 'speaker_embeddings_path.json'
lowerCamelCase_ = 'speaker_embeddings'
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowerCamelCase_ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCamelCase_ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowerCamelCase_ = 35
lowerCamelCase_ = 2
lowerCamelCase_ = 8
lowerCamelCase_ = {
'semantic_prompt': np.ones(SCREAMING_SNAKE_CASE_ ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowerCamelCase_ = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowerCamelCase_ = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowerCamelCase_ = processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = processor(text=self.input_string )
lowerCamelCase_ = tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 42 | 1 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __UpperCamelCase ) -> bool:
lowerCamelCase_ = str(__UpperCamelCase )
return len(__UpperCamelCase ) == 9 and set(__UpperCamelCase ) == set('123456789' )
def _UpperCamelCase ( ) -> int | None:
for base_num in range(99_99 ,49_99 ,-1 ):
lowerCamelCase_ = 10_00_02 * base_num
if is_9_pandigital(__UpperCamelCase ):
return candidate
for base_num in range(3_33 ,99 ,-1 ):
lowerCamelCase_ = 1_00_20_03 * base_num
if is_9_pandigital(__UpperCamelCase ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 42 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
A_ = {
"vocab_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
},
"merges_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
},
}
A_ = {
"allenai/longformer-base-4096": 4_096,
"allenai/longformer-large-4096": 4_096,
"allenai/longformer-large-4096-finetuned-triviaqa": 4_096,
"allenai/longformer-base-4096-extra.pos.embd.only": 4_096,
"allenai/longformer-large-4096-extra.pos.embd.only": 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCamelCase ( ) -> Any:
lowerCamelCase_ = (
list(range(ord('!' ) ,ord('~' ) + 1 ) ) + list(range(ord('¡' ) ,ord('¬' ) + 1 ) ) + list(range(ord('®' ) ,ord('ÿ' ) + 1 ) )
)
lowerCamelCase_ = bs[:]
lowerCamelCase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCamelCase )
cs.append(2**8 + n )
n += 1
lowerCamelCase_ = [chr(__UpperCamelCase ) for n in cs]
return dict(zip(__UpperCamelCase ,__UpperCamelCase ) )
def _UpperCamelCase ( __UpperCamelCase ) -> List[str]:
lowerCamelCase_ = set()
lowerCamelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase_ = char
return pairs
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask']
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="replace" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as vocab_handle:
lowerCamelCase_ = json.load(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {v: k for k, v in self.encoder.items()}
lowerCamelCase_ = errors # how to handle errors in decoding
lowerCamelCase_ = bytes_to_unicode()
lowerCamelCase_ = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as merges_handle:
lowerCamelCase_ = merges_handle.read().split('\n' )[1:-1]
lowerCamelCase_ = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCamelCase_ = {}
lowerCamelCase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
return len(self.encoder )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCamelCase_ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
lowerCamelCase_ = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_ ,lowerCamelCase_ = bigram
lowerCamelCase_ = []
lowerCamelCase_ = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
lowerCamelCase_ = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase_ = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
lowerCamelCase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = ' '.join(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = word
return word
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
lowerCamelCase_ = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(' ' ) )
return bpe_tokens
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = ''.join(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '\n' )
lowerCamelCase_ = 0
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
lowerCamelCase_ = token_index
writer.write(' '.join(SCREAMING_SNAKE_CASE_ ) + '\n' )
index += 1
return vocab_file, merge_file
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
lowerCamelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()):
lowerCamelCase_ = ' ' + text
return (text, kwargs)
| 42 | 1 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> float:
lowerCamelCase_ = x
lowerCamelCase_ = y
for step in range(__UpperCamelCase ): # noqa: B007
lowerCamelCase_ = a * a - b * b + x
lowerCamelCase_ = 2 * a * b + y
lowerCamelCase_ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _UpperCamelCase ( __UpperCamelCase ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def _UpperCamelCase ( __UpperCamelCase ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(__UpperCamelCase ,1 ,1 ) )
def _UpperCamelCase ( __UpperCamelCase = 8_00 ,__UpperCamelCase = 6_00 ,__UpperCamelCase = -0.6 ,__UpperCamelCase = 0 ,__UpperCamelCase = 3.2 ,__UpperCamelCase = 50 ,__UpperCamelCase = True ,) -> Image.Image:
lowerCamelCase_ = Image.new('RGB' ,(image_width, image_height) )
lowerCamelCase_ = img.load()
# loop through the image-coordinates
for image_x in range(__UpperCamelCase ):
for image_y in range(__UpperCamelCase ):
# determine the figure-coordinates based on the image-coordinates
lowerCamelCase_ = figure_width / image_width * image_height
lowerCamelCase_ = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCamelCase_ = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCamelCase_ = get_distance(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCamelCase_ = get_color_coded_rgb(__UpperCamelCase )
else:
lowerCamelCase_ = get_black_and_white_rgb(__UpperCamelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
A_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 42 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
A_ = {
"vocab_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt",
},
"tokenizer_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"
),
"google/realm-orqa-nq-openqa": (
"https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-nq-reader": (
"https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-openqa": (
"https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-reader": (
"https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"
),
},
}
A_ = {
"google/realm-cc-news-pretrained-embedder": 512,
"google/realm-cc-news-pretrained-encoder": 512,
"google/realm-cc-news-pretrained-scorer": 512,
"google/realm-cc-news-pretrained-openqa": 512,
"google/realm-orqa-nq-openqa": 512,
"google/realm-orqa-nq-reader": 512,
"google/realm-orqa-wq-openqa": 512,
"google/realm-orqa-wq-reader": 512,
}
A_ = {
"google/realm-cc-news-pretrained-embedder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-encoder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-scorer": {"do_lower_case": True},
"google/realm-cc-news-pretrained-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-reader": {"do_lower_case": True},
"google/realm-orqa-wq-openqa": {"do_lower_case": True},
"google/realm-orqa-wq-reader": {"do_lower_case": True},
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = RealmTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="[UNK]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[PAD]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> List[Any]:
'''simple docstring'''
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE_ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE_ ) != tokenize_chinese_chars
):
lowerCamelCase_ = getattr(SCREAMING_SNAKE_CASE_ , normalizer_state.pop('type' ) )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = strip_accents
lowerCamelCase_ = tokenize_chinese_chars
lowerCamelCase_ = normalizer_class(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = do_lower_case
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = PaddingStrategy.MAX_LENGTH
lowerCamelCase_ = text
lowerCamelCase_ = kwargs.pop('text_pair' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = kwargs.pop('return_tensors' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(SCREAMING_SNAKE_CASE_ ):
if batch_text_pair is not None:
lowerCamelCase_ = batch_text_pair[idx]
else:
lowerCamelCase_ = None
lowerCamelCase_ = super().__call__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = encoded_candidates.get('input_ids' )
lowerCamelCase_ = encoded_candidates.get('attention_mask' )
lowerCamelCase_ = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(SCREAMING_SNAKE_CASE_ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(SCREAMING_SNAKE_CASE_ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {key: item for key, item in output_data.items() if len(SCREAMING_SNAKE_CASE_ ) != 0}
return BatchEncoding(SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
'''simple docstring'''
lowerCamelCase_ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 42 | 1 |
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ) -> int:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = BioGptModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Any:
'''simple docstring'''
lowerCamelCase_ = BioGptForCausalLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = BioGptModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
# create attention mask
lowerCamelCase_ = torch.ones(input_ids.shape , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.seq_length // 2
lowerCamelCase_ = 0
# first forward pass
lowerCamelCase_ ,lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase_ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
lowerCamelCase_ = ids_tensor((1,) , SCREAMING_SNAKE_CASE_ ).item() + 1
lowerCamelCase_ = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
lowerCamelCase_ = random_other_next_tokens
# append to next input_ids and attn_mask
lowerCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase_ = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )] , dim=1 , )
# get two different outputs
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )['last_hidden_state']
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )['last_hidden_state']
# select random slice
lowerCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase_ = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCamelCase_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
lowerCamelCase_ = BioGptModel(config=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ).eval()
lowerCamelCase_ = torch.ones(input_ids.shape , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
# first forward pass
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ ,lowerCamelCase_ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase_ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
lowerCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase_ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )['last_hidden_state']
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ )[
'last_hidden_state'
]
# select random slice
lowerCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = BioGptForCausalLM(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = BioGptModel(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = BioGptForTokenClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (BioGptForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = BioGptModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase_ = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*SCREAMING_SNAKE_CASE_ , gradient_checkpointing=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
lowerCamelCase_ = 'left'
# Define PAD Token = EOS Token = 50256
lowerCamelCase_ = tokenizer.eos_token
lowerCamelCase_ = model.config.eos_token_id
# use different length sentences to test batching
lowerCamelCase_ = [
'Hello, my dog is a little',
'Today, I',
]
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='pt' , padding=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inputs['input_ids'].to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model.generate(
input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=inputs['attention_mask'].to(SCREAMING_SNAKE_CASE_ ) , )
lowerCamelCase_ = tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model.generate(input_ids=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()
lowerCamelCase_ = tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , max_length=model.config.max_length - num_paddings )
lowerCamelCase_ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = [
'Hello, my dog is a little bit bigger than a little bit.',
'Today, I have a good idea of how to use the information',
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , [non_padded_sentence, padded_sentence] )
@slow
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = BioGptModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = 3
lowerCamelCase_ = input_dict['input_ids']
lowerCamelCase_ = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase_ = BioGptForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = 3
lowerCamelCase_ = 'multi_label_classification'
lowerCamelCase_ = input_dict['input_ids']
lowerCamelCase_ = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCamelCase_ = BioGptForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
lowerCamelCase_ = torch.tensor([[2, 4805, 9, 656, 21]] )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )[0]
lowerCamelCase_ = 42384
lowerCamelCase_ = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.tensor(
[[[-9.5_236, -9.8_918, 10.4_557], [-11.0_469, -9.6_423, 8.1_022], [-8.8_664, -7.8_826, 5.5_325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
lowerCamelCase_ = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(SCREAMING_SNAKE_CASE_ )
torch.manual_seed(0 )
lowerCamelCase_ = tokenizer('COVID-19 is' , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model.generate(
**SCREAMING_SNAKE_CASE_ , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = (
'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'
' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'
' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'
' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'
' more than 800,000 deaths.'
)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 42 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __UpperCamelCase ) -> bool:
lowerCamelCase_ = str(__UpperCamelCase )
return len(__UpperCamelCase ) == 9 and set(__UpperCamelCase ) == set('123456789' )
def _UpperCamelCase ( ) -> int | None:
for base_num in range(99_99 ,49_99 ,-1 ):
lowerCamelCase_ = 10_00_02 * base_num
if is_9_pandigital(__UpperCamelCase ):
return candidate
for base_num in range(3_33 ,99 ,-1 ):
lowerCamelCase_ = 1_00_20_03 * base_num
if is_9_pandigital(__UpperCamelCase ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 42 | 1 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"vocab_file": "vocab.txt",
"merges_file": "bpe.codes",
}
A_ = {
"vocab_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt",
},
"merges_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes",
},
}
A_ = {
"vinai/phobert-base": 256,
"vinai/phobert-large": 256,
}
def _UpperCamelCase ( __UpperCamelCase ) -> Dict:
lowerCamelCase_ = set()
lowerCamelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase_ = char
lowerCamelCase_ = set(__UpperCamelCase )
return pairs
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , **SCREAMING_SNAKE_CASE_ , ) -> Dict:
'''simple docstring'''
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = vocab_file
lowerCamelCase_ = merges_file
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 1
lowerCamelCase_ = 2
lowerCamelCase_ = 3
self.add_from_file(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {v: k for k, v in self.encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as merges_handle:
lowerCamelCase_ = merges_handle.read().split('\n' )[:-1]
lowerCamelCase_ = [tuple(merge.split()[:-1] ) for merge in merges]
lowerCamelCase_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCamelCase_ = {}
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
lowerCamelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
return len(self.encoder )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCamelCase_ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
lowerCamelCase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
lowerCamelCase_ = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_ ,lowerCamelCase_ = bigram
lowerCamelCase_ = []
lowerCamelCase_ = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
lowerCamelCase_ = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase_ = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
lowerCamelCase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = '@@ '.join(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = word[:-4]
lowerCamelCase_ = word
return word
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
lowerCamelCase_ = []
lowerCamelCase_ = re.findall(r'\S+\n?' , SCREAMING_SNAKE_CASE_ )
for token in words:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE_ ).split(' ' ) ) )
return split_tokens
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
return self.decoder.get(SCREAMING_SNAKE_CASE_ , self.unk_token )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
lowerCamelCase_ = ' '.join(SCREAMING_SNAKE_CASE_ ).replace('@@ ' , '' ).strip()
return out_string
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.merges_file , SCREAMING_SNAKE_CASE_ )
return out_vocab_file, out_merge_file
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
try:
with open(SCREAMING_SNAKE_CASE_ , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(SCREAMING_SNAKE_CASE_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
lowerCamelCase_ = f.readlines()
for lineTmp in lines:
lowerCamelCase_ = lineTmp.strip()
lowerCamelCase_ = line.rfind(' ' )
if idx == -1:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt>\'' )
lowerCamelCase_ = line[:idx]
lowerCamelCase_ = len(self.encoder )
| 42 |
'''simple docstring'''
A_ = "Input must be a string of 8 numbers plus letter"
A_ = "TRWAGMYFPDXBNJZSQVHLCKE"
def _UpperCamelCase ( __UpperCamelCase ) -> bool:
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowerCamelCase_ = f'''Expected string as input, found {type(__UpperCamelCase ).__name__}'''
raise TypeError(__UpperCamelCase )
lowerCamelCase_ = spanish_id.replace('-' ,'' ).upper()
if len(__UpperCamelCase ) != 9:
raise ValueError(__UpperCamelCase )
try:
lowerCamelCase_ = int(spanish_id_clean[0:8] )
lowerCamelCase_ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__UpperCamelCase ) from ex
if letter.isdigit():
raise ValueError(__UpperCamelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json",
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'data2vec-text'
def __init__( self , SCREAMING_SNAKE_CASE_=30522 , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = position_embedding_type
lowerCamelCase_ = use_cache
lowerCamelCase_ = classifier_dropout
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def UpperCamelCase( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCamelCase_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 42 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = GPTSanJapaneseTokenizer
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = {'do_clean_text': False, 'add_prefix_space': False}
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# fmt: off
lowerCamelCase_ = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
lowerCamelCase_ = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
lowerCamelCase_ = {'unk_token': '<unk>'}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(SCREAMING_SNAKE_CASE_ ) )
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = 'こんにちは、世界。 \nこんばんは、㔺界。😀'
lowerCamelCase_ = 'こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.get_input_output_texts(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
return text, ids
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
# Testing tokenization
lowerCamelCase_ = 'こんにちは、世界。 こんばんは、㔺界。'
lowerCamelCase_ = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
lowerCamelCase_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing conversion to ids without special tokens
lowerCamelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing conversion to ids with special tokens
lowerCamelCase_ = tokens + [tokenizer.unk_token]
lowerCamelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
# Testing tokenization
lowerCamelCase_ = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
lowerCamelCase_ = 'こんにちは、、、、世界。こんばんは、、、、世界。'
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
lowerCamelCase_ = 'こんにちは、世界。'
lowerCamelCase_ = 'こんばんは、㔺界。😀'
lowerCamelCase_ = 'こんにちは、世界。こんばんは、世界。😀'
lowerCamelCase_ = tokenizer.encode(prefix_text + input_text )
lowerCamelCase_ = tokenizer.encode('' , prefix_text=prefix_text + input_text )
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , prefix_text=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
lowerCamelCase_ = 'こんにちは、世界。'
lowerCamelCase_ = 'こんばんは、㔺界。😀'
lowerCamelCase_ = len(tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) - 2
lowerCamelCase_ = len(tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) - 2
lowerCamelCase_ = [1] + [0] * (len_prefix + len_text + 1)
lowerCamelCase_ = [1] * (len_prefix + len_text + 1) + [0]
lowerCamelCase_ = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowerCamelCase_ = tokenizer(prefix_text + input_text ).token_type_ids
lowerCamelCase_ = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , prefix_text=SCREAMING_SNAKE_CASE_ ).token_type_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
lowerCamelCase_ = tokenizer.encode('あンいワ' )
lowerCamelCase_ = tokenizer.encode('' , prefix_text='あンいワ' )
lowerCamelCase_ = tokenizer.encode('いワ' , prefix_text='あン' )
self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
lowerCamelCase_ = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.batch_encode_plus(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
# fmt: off
lowerCamelCase_ = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
lowerCamelCase_ = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowerCamelCase_ = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token.token_type_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token.attention_mask , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.input_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.token_type_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.attention_mask , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
pass
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass
| 42 | 1 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
A_ = logging.getLogger(__name__)
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'summarization'
SCREAMING_SNAKE_CASE_ = ['loss']
SCREAMING_SNAKE_CASE_ = ROUGE_KEYS
SCREAMING_SNAKE_CASE_ = 'rouge2'
def __init__( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
if hparams.sortish_sampler and hparams.gpus > 1:
lowerCamelCase_ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' )
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' )
super().__init__(SCREAMING_SNAKE_CASE_ , num_labels=SCREAMING_SNAKE_CASE_ , mode=self.mode , **SCREAMING_SNAKE_CASE_ )
use_task_specific_params(self.model , 'summarization' )
save_git_info(self.hparams.output_dir )
lowerCamelCase_ = Path(self.output_dir ) / 'metrics.json'
lowerCamelCase_ = Path(self.output_dir ) / 'hparams.pkl'
pickle_save(self.hparams , self.hparams_save_path )
lowerCamelCase_ = 0
lowerCamelCase_ = defaultdict(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.config.model_type
lowerCamelCase_ = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
lowerCamelCase_ = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
lowerCamelCase_ = {
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
lowerCamelCase_ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
lowerCamelCase_ = {
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], f'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
lowerCamelCase_ = get_git_info()['repo_sha']
lowerCamelCase_ = hparams.num_workers
lowerCamelCase_ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
lowerCamelCase_ = self.decoder_start_token_id
lowerCamelCase_ = (
SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset
)
lowerCamelCase_ = False
lowerCamelCase_ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
lowerCamelCase_ = self.hparams.eval_max_gen_length
else:
lowerCamelCase_ = self.model.config.max_length
lowerCamelCase_ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Dict[str, List[str]]:
'''simple docstring'''
lowerCamelCase_ = {
k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(SCREAMING_SNAKE_CASE_ , Path(self.output_dir ) / 'text_batch.json' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' )
lowerCamelCase_ = True
return readable_batch
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
return self.model(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer.batch_decode(
SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
return lmap(str.strip , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer.pad_token_id
lowerCamelCase_ ,lowerCamelCase_ = batch['input_ids'], batch['attention_mask']
lowerCamelCase_ = batch['labels']
if isinstance(self.model , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = self.model._shift_right(SCREAMING_SNAKE_CASE_ )
else:
lowerCamelCase_ = shift_tokens_right(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
lowerCamelCase_ = decoder_input_ids
self.save_readable_batch(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , decoder_input_ids=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
lowerCamelCase_ = nn.CrossEntropyLoss(ignore_index=SCREAMING_SNAKE_CASE_ )
assert lm_logits.shape[-1] == self.vocab_size
lowerCamelCase_ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
lowerCamelCase_ = nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=-1 )
lowerCamelCase_ ,lowerCamelCase_ = label_smoothed_nll_loss(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.hparams.label_smoothing , ignore_index=SCREAMING_SNAKE_CASE_ )
return (loss,)
@property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
return self.tokenizer.pad_token_id
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self._step(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = dict(zip(self.loss_names , SCREAMING_SNAKE_CASE_ ) )
# tokens per batch
lowerCamelCase_ = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum()
lowerCamelCase_ = batch['input_ids'].shape[0]
lowerCamelCase_ = batch['input_ids'].eq(self.pad ).sum()
lowerCamelCase_ = batch['input_ids'].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
return self._generative_step(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="val" ) -> Dict:
'''simple docstring'''
self.step_count += 1
lowerCamelCase_ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
lowerCamelCase_ = losses['loss']
lowerCamelCase_ = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
lowerCamelCase_ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
lowerCamelCase_ = torch.tensor(SCREAMING_SNAKE_CASE_ ).type_as(SCREAMING_SNAKE_CASE_ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {f'''{prefix}_avg_{k}''': x for k, x in losses.items()}
lowerCamelCase_ = self.step_count
self.metrics[prefix].append(SCREAMING_SNAKE_CASE_ ) # callback writes this to self.metrics_save_path
lowerCamelCase_ = flatten_list([x['preds'] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f'''{prefix}_loss''': loss,
f'''{prefix}_{self.val_metric}''': metric_tensor,
}
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
return calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> dict:
'''simple docstring'''
lowerCamelCase_ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
lowerCamelCase_ = self.model.generate(
batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
lowerCamelCase_ = (time.time() - ta) / batch['input_ids'].shape[0]
lowerCamelCase_ = self.ids_to_clean_text(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.ids_to_clean_text(batch['labels'] )
lowerCamelCase_ = self._step(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = dict(zip(self.loss_names , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = self.calc_generative_metrics(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = np.mean(lmap(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
base_metrics.update(gen_time=SCREAMING_SNAKE_CASE_ , gen_len=SCREAMING_SNAKE_CASE_ , preds=SCREAMING_SNAKE_CASE_ , target=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return base_metrics
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
return self._generative_step(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
return self.validation_epoch_end(SCREAMING_SNAKE_CASE_ , prefix='test' )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> SeqaSeqDataset:
'''simple docstring'''
lowerCamelCase_ = self.n_obs[type_path]
lowerCamelCase_ = self.target_lens[type_path]
lowerCamelCase_ = self.dataset_class(
self.tokenizer , type_path=SCREAMING_SNAKE_CASE_ , n_obs=SCREAMING_SNAKE_CASE_ , max_target_length=SCREAMING_SNAKE_CASE_ , **self.dataset_kwargs , )
return dataset
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False ) -> DataLoader:
'''simple docstring'''
lowerCamelCase_ = self.get_dataset(SCREAMING_SNAKE_CASE_ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
lowerCamelCase_ = dataset.make_sortish_sampler(SCREAMING_SNAKE_CASE_ , distributed=self.hparams.gpus > 1 )
return DataLoader(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , collate_fn=dataset.collate_fn , shuffle=SCREAMING_SNAKE_CASE_ , num_workers=self.num_workers , sampler=SCREAMING_SNAKE_CASE_ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
lowerCamelCase_ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
SCREAMING_SNAKE_CASE_ , batch_sampler=SCREAMING_SNAKE_CASE_ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , collate_fn=dataset.collate_fn , shuffle=SCREAMING_SNAKE_CASE_ , num_workers=self.num_workers , sampler=SCREAMING_SNAKE_CASE_ , )
def UpperCamelCase( self ) -> DataLoader:
'''simple docstring'''
lowerCamelCase_ = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=SCREAMING_SNAKE_CASE_ )
return dataloader
def UpperCamelCase( self ) -> DataLoader:
'''simple docstring'''
return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size )
def UpperCamelCase( self ) -> DataLoader:
'''simple docstring'''
return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def UpperCamelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
BaseTransformer.add_model_specific_args(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
add_generic_args(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
parser.add_argument(
'--max_source_length' , default=1024 , type=SCREAMING_SNAKE_CASE_ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--max_target_length' , default=56 , type=SCREAMING_SNAKE_CASE_ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--val_max_target_length' , default=142 , type=SCREAMING_SNAKE_CASE_ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--test_max_target_length' , default=142 , type=SCREAMING_SNAKE_CASE_ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument('--freeze_encoder' , action='store_true' )
parser.add_argument('--freeze_embeds' , action='store_true' )
parser.add_argument('--sortish_sampler' , action='store_true' , default=SCREAMING_SNAKE_CASE_ )
parser.add_argument('--overwrite_output_dir' , action='store_true' , default=SCREAMING_SNAKE_CASE_ )
parser.add_argument('--max_tokens_per_batch' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ )
parser.add_argument('--logger_name' , type=SCREAMING_SNAKE_CASE_ , choices=['default', 'wandb', 'wandb_shared'] , default='default' )
parser.add_argument('--n_train' , type=SCREAMING_SNAKE_CASE_ , default=-1 , required=SCREAMING_SNAKE_CASE_ , help='# examples. -1 means use all.' )
parser.add_argument('--n_val' , type=SCREAMING_SNAKE_CASE_ , default=500 , required=SCREAMING_SNAKE_CASE_ , help='# examples. -1 means use all.' )
parser.add_argument('--n_test' , type=SCREAMING_SNAKE_CASE_ , default=-1 , required=SCREAMING_SNAKE_CASE_ , help='# examples. -1 means use all.' )
parser.add_argument(
'--task' , type=SCREAMING_SNAKE_CASE_ , default='summarization' , required=SCREAMING_SNAKE_CASE_ , help='# examples. -1 means use all.' )
parser.add_argument('--label_smoothing' , type=SCREAMING_SNAKE_CASE_ , default=0.0 , required=SCREAMING_SNAKE_CASE_ )
parser.add_argument('--src_lang' , type=SCREAMING_SNAKE_CASE_ , default='' , required=SCREAMING_SNAKE_CASE_ )
parser.add_argument('--tgt_lang' , type=SCREAMING_SNAKE_CASE_ , default='' , required=SCREAMING_SNAKE_CASE_ )
parser.add_argument('--eval_beams' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ )
parser.add_argument(
'--val_metric' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , choices=['bleu', 'rouge2', 'loss', None] )
parser.add_argument('--eval_max_gen_length' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='never generate more than n tokens' )
parser.add_argument('--save_top_k' , type=SCREAMING_SNAKE_CASE_ , default=1 , required=SCREAMING_SNAKE_CASE_ , help='How many checkpoints to save' )
parser.add_argument(
'--early_stopping_patience' , type=SCREAMING_SNAKE_CASE_ , default=-1 , required=SCREAMING_SNAKE_CASE_ , help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
) , )
return parser
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'translation'
SCREAMING_SNAKE_CASE_ = ['loss']
SCREAMING_SNAKE_CASE_ = ['bleu']
SCREAMING_SNAKE_CASE_ = 'bleu'
def __init__( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = hparams.src_lang
lowerCamelCase_ = hparams.tgt_lang
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> dict:
'''simple docstring'''
return calculate_bleu(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase=None ) -> SummarizationModule:
Path(args.output_dir ).mkdir(exist_ok=__UpperCamelCase )
check_output_dir(__UpperCamelCase ,expected_items=3 )
if model is None:
if "summarization" in args.task:
lowerCamelCase_ = SummarizationModule(__UpperCamelCase )
else:
lowerCamelCase_ = TranslationModule(__UpperCamelCase )
lowerCamelCase_ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('/tmp' )
or str(args.output_dir ).startswith('/var' )
):
lowerCamelCase_ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
lowerCamelCase_ = os.environ.get('WANDB_PROJECT' ,__UpperCamelCase )
lowerCamelCase_ = WandbLogger(name=model.output_dir.name ,project=__UpperCamelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
lowerCamelCase_ = WandbLogger(name=model.output_dir.name ,project=f'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
lowerCamelCase_ = get_early_stopping_callback(model.val_metric ,args.early_stopping_patience )
else:
lowerCamelCase_ = False
lowerCamelCase_ = args.val_metric == 'loss'
lowerCamelCase_ = generic_train(
__UpperCamelCase ,__UpperCamelCase ,logging_callback=SeqaSeqLoggingCallback() ,checkpoint_callback=get_checkpoint_callback(
args.output_dir ,model.val_metric ,args.save_top_k ,__UpperCamelCase ) ,early_stopping_callback=__UpperCamelCase ,logger=__UpperCamelCase ,)
pickle_save(model.hparams ,model.output_dir / 'hparams.pkl' )
if not args.do_predict:
return model
lowerCamelCase_ = ''
lowerCamelCase_ = sorted(glob.glob(os.path.join(args.output_dir ,'*.ckpt' ) ,recursive=__UpperCamelCase ) )
if checkpoints:
lowerCamelCase_ = checkpoints[-1]
lowerCamelCase_ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
A_ = pl.Trainer.add_argparse_args(parser)
A_ = SummarizationModule.add_model_specific_args(parser, os.getcwd())
A_ = parser.parse_args()
main(args)
| 42 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
A_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Dict:
'''simple docstring'''
super().__init__()
self.register_modules(
vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ = "auto" ) -> List[str]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> List[str]:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = 1
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = len(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE_ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(SCREAMING_SNAKE_CASE_ )}.''' )
# get prompt text embeddings
lowerCamelCase_ = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
lowerCamelCase_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCamelCase_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowerCamelCase_ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowerCamelCase_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = text_embeddings.shape
lowerCamelCase_ = text_embeddings.repeat(1 , SCREAMING_SNAKE_CASE_ , 1 )
lowerCamelCase_ = text_embeddings.view(bs_embed * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase_ = 42
if negative_prompt is None:
lowerCamelCase_ = ['']
elif type(SCREAMING_SNAKE_CASE_ ) is not type(SCREAMING_SNAKE_CASE_ ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(SCREAMING_SNAKE_CASE_ )} !='''
f''' {type(SCREAMING_SNAKE_CASE_ )}.''' )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = [negative_prompt]
elif batch_size != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(SCREAMING_SNAKE_CASE_ )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
' the batch size of `prompt`.' )
else:
lowerCamelCase_ = negative_prompt
lowerCamelCase_ = text_input_ids.shape[-1]
lowerCamelCase_ = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='pt' , )
lowerCamelCase_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCamelCase_ = uncond_embeddings.shape[1]
lowerCamelCase_ = uncond_embeddings.repeat(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 )
lowerCamelCase_ = uncond_embeddings.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
lowerCamelCase_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCamelCase_ = torch.randn(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='cpu' , dtype=SCREAMING_SNAKE_CASE_ ).to(self.device )
lowerCamelCase_ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='cpu' , dtype=SCREAMING_SNAKE_CASE_ ).to(
self.device )
else:
lowerCamelCase_ = torch.randn(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowerCamelCase_ = latents_reference.to(self.device )
lowerCamelCase_ = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowerCamelCase_ = (latents_shape[3] - latents_shape_reference[3]) // 2
lowerCamelCase_ = (latents_shape[2] - latents_shape_reference[2]) // 2
lowerCamelCase_ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowerCamelCase_ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowerCamelCase_ = 0 if dx < 0 else dx
lowerCamelCase_ = 0 if dy < 0 else dy
lowerCamelCase_ = max(-dx , 0 )
lowerCamelCase_ = max(-dy , 0 )
# import pdb
# pdb.set_trace()
lowerCamelCase_ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCamelCase_ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase_ = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ = {}
if accepts_eta:
lowerCamelCase_ = eta
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
lowerCamelCase_ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCamelCase_ ,lowerCamelCase_ = noise_pred.chunk(2 )
lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 1 / 0.18_215 * latents
lowerCamelCase_ = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
lowerCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
lowerCamelCase_ = self.feature_extractor(self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) , return_tensors='pt' ).to(
self.device )
lowerCamelCase_ ,lowerCamelCase_ = self.safety_checker(
images=SCREAMING_SNAKE_CASE_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
lowerCamelCase_ = None
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ , nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
| 42 | 1 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
A_ = random.Random()
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase=1.0 ,__UpperCamelCase=None ,__UpperCamelCase=None ) -> Dict:
if rng is None:
lowerCamelCase_ = global_rng
lowerCamelCase_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=400 , SCREAMING_SNAKE_CASE_=2000 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=16000 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=80 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_="hann_window" , SCREAMING_SNAKE_CASE_=80 , SCREAMING_SNAKE_CASE_=7600 , SCREAMING_SNAKE_CASE_=1E-10 , SCREAMING_SNAKE_CASE_=True , ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = min_seq_length
lowerCamelCase_ = max_seq_length
lowerCamelCase_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase_ = feature_size
lowerCamelCase_ = padding_value
lowerCamelCase_ = sampling_rate
lowerCamelCase_ = do_normalize
lowerCamelCase_ = num_mel_bins
lowerCamelCase_ = hop_length
lowerCamelCase_ = win_length
lowerCamelCase_ = win_function
lowerCamelCase_ = fmin
lowerCamelCase_ = fmax
lowerCamelCase_ = mel_floor
lowerCamelCase_ = return_attention_mask
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ) -> List[str]:
'''simple docstring'''
def _flatten(SCREAMING_SNAKE_CASE_ ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE_ ) )
if equal_length:
lowerCamelCase_ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCamelCase_ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase_ = [np.asarray(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs]
return speech_inputs
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ) -> Tuple:
'''simple docstring'''
if equal_length:
lowerCamelCase_ = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCamelCase_ = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase_ = [np.asarray(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs]
return speech_inputs
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = SpeechTaFeatureExtractor
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = SpeechTaFeatureExtractionTester(self )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
self.assertTrue(np.all(np.mean(SCREAMING_SNAKE_CASE_ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(SCREAMING_SNAKE_CASE_ , axis=0 ) - 1 ) < 1E-3 ) )
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs]
# Test not batched input
lowerCamelCase_ = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
lowerCamelCase_ = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
# Test batched
lowerCamelCase_ = feat_extract(SCREAMING_SNAKE_CASE_ , return_tensors='np' ).input_values
lowerCamelCase_ = feat_extract(SCREAMING_SNAKE_CASE_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = ['longest', 'max_length', 'do_not_pad']
lowerCamelCase_ = [None, 1600, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = feat_extract(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='np' )
lowerCamelCase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = range(800 , 1400 , 200 )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in lengths]
lowerCamelCase_ = ['longest', 'max_length', 'do_not_pad']
lowerCamelCase_ = [None, 1600, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = feat_extract(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = feat_extract(
SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=1000 , padding='max_length' , return_tensors='np' )
lowerCamelCase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = feat_extract(
SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=1000 , padding='longest' , return_tensors='np' )
lowerCamelCase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = feat_extract(
SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=2000 , padding='longest' , return_tensors='np' )
lowerCamelCase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = np.random.rand(100 ).astype(np.floataa )
lowerCamelCase_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCamelCase_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCamelCase_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs]
# Test feature size
lowerCamelCase_ = feature_extractor(audio_target=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors='np' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
lowerCamelCase_ = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values
lowerCamelCase_ = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
# Test batched
lowerCamelCase_ = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' ).input_values
lowerCamelCase_ = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCamelCase_ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCamelCase_ = np.asarray(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' ).input_values
lowerCamelCase_ = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target()
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase_ = feat_extract.model_input_names[0]
lowerCamelCase_ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) for x, y in zip(SCREAMING_SNAKE_CASE_ , processed_features[input_name] ) ) )
lowerCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
lowerCamelCase_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCamelCase_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase_ = feat_extract.model_input_names[0]
lowerCamelCase_ = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
lowerCamelCase_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCamelCase_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target()
lowerCamelCase_ = feat_extract.model_input_names[0]
lowerCamelCase_ = BatchFeature({input_name: speech_inputs} )
lowerCamelCase_ = feat_extract.num_mel_bins # hack!
lowerCamelCase_ = feat_extract.pad(SCREAMING_SNAKE_CASE_ , padding='longest' , return_tensors='np' )[input_name]
lowerCamelCase_ = feat_extract.pad(SCREAMING_SNAKE_CASE_ , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = self.feat_extract_dict
lowerCamelCase_ = True
lowerCamelCase_ = self.feature_extraction_class(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target()
lowerCamelCase_ = [len(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs]
lowerCamelCase_ = feat_extract.model_input_names[0]
lowerCamelCase_ = BatchFeature({input_name: speech_inputs} )
lowerCamelCase_ = feat_extract.num_mel_bins # hack!
lowerCamelCase_ = feat_extract.pad(SCREAMING_SNAKE_CASE_ , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = self.feat_extract_dict
lowerCamelCase_ = True
lowerCamelCase_ = self.feature_extraction_class(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target()
lowerCamelCase_ = [len(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs]
lowerCamelCase_ = feat_extract.model_input_names[0]
lowerCamelCase_ = BatchFeature({input_name: speech_inputs} )
lowerCamelCase_ = min(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = feat_extract.num_mel_bins # hack!
lowerCamelCase_ = feat_extract.pad(
SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='np' )
self.assertIn('attention_mask' , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
from datasets import load_dataset
lowerCamelCase_ = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCamelCase_ = ds.sort('id' ).select(range(SCREAMING_SNAKE_CASE_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = torch.tensor(
[2.3804E-03, 2.0752E-03, 1.9836E-03, 2.1057E-03, 1.6174E-03,
3.0518E-04, 9.1553E-05, 3.3569E-04, 9.7656E-04, 1.8311E-03,
2.0142E-03, 2.1057E-03, 1.7395E-03, 4.5776E-04, -3.9673E-04,
4.5776E-04, 1.0071E-03, 9.1553E-05, 4.8828E-04, 1.1597E-03,
7.3242E-04, 9.4604E-04, 1.8005E-03, 1.8311E-03, 8.8501E-04,
4.2725E-04, 4.8828E-04, 7.3242E-04, 1.0986E-03, 2.1057E-03] )
# fmt: on
lowerCamelCase_ = self._load_datasamples(1 )
lowerCamelCase_ = SpeechTaFeatureExtractor()
lowerCamelCase_ = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 93680) )
self.assertTrue(torch.allclose(input_values[0, :30] , SCREAMING_SNAKE_CASE_ , atol=1E-6 ) )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = torch.tensor(
[-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777,
-3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386,
-3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571,
-3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] )
# fmt: on
lowerCamelCase_ = self._load_datasamples(1 )
lowerCamelCase_ = SpeechTaFeatureExtractor()
lowerCamelCase_ = feature_extractor(audio_target=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 42 |
'''simple docstring'''
import pprint
import requests
A_ = "https://zenquotes.io/api"
def _UpperCamelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def _UpperCamelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
A_ = random_quotes()
pprint.pprint(response)
| 42 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'num_attention_heads' ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'num_encoder_blocks' ) )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=[2, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[8, 4, 2, 1] , SCREAMING_SNAKE_CASE_=[16, 32, 64, 128] , SCREAMING_SNAKE_CASE_=[1, 4, 8, 16] , SCREAMING_SNAKE_CASE_=[1, 2, 4, 8] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=None , ) -> str:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = num_encoder_blocks
lowerCamelCase_ = sr_ratios
lowerCamelCase_ = depths
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = downsampling_rates
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = scope
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase( self ) -> int:
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
lowerCamelCase_ = SegformerModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = lowerCamelCase_ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = SegformerForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
lowerCamelCase_ = 1
lowerCamelCase_ = SegformerForSemanticSegmentation(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
{
'feature-extraction': SegformerModel,
'image-classification': SegformerForImageClassification,
'image-segmentation': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = SegformerModelTester(self )
lowerCamelCase_ = SegformerConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*SCREAMING_SNAKE_CASE_ )
@unittest.skip('SegFormer does not use inputs_embeds' )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = True
for model_class in self.all_model_classes:
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = True
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = outputs.attentions
lowerCamelCase_ = sum(self.model_tester.depths )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase_ = True
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# verify the first attentions (first block, first layer)
lowerCamelCase_ = (self.model_tester.image_size // 4) ** 2
lowerCamelCase_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
lowerCamelCase_ = (self.model_tester.image_size // 32) ** 2
lowerCamelCase_ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
lowerCamelCase_ = len(SCREAMING_SNAKE_CASE_ )
# Check attention is always last and order is fine
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(out_len + 1 , len(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# verify the first attentions (first block, first layer)
lowerCamelCase_ = (self.model_tester.image_size // 4) ** 2
lowerCamelCase_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = outputs.hidden_states
lowerCamelCase_ = self.model_tester.num_encoder_blocks
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
if not self.model_tester.is_training:
return
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = True
for model_class in self.all_model_classes:
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
continue
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
lowerCamelCase_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
pass
@slow
def UpperCamelCase( self ) -> str:
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = SegformerModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _UpperCamelCase ( ) -> Optional[Any]:
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=SCREAMING_SNAKE_CASE_ , align=SCREAMING_SNAKE_CASE_ , do_random_crop=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
lowerCamelCase_ = encoded_inputs.pixel_values.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
@slow
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=SCREAMING_SNAKE_CASE_ , align=SCREAMING_SNAKE_CASE_ , do_random_crop=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
lowerCamelCase_ = encoded_inputs.pixel_values.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-1 ) )
@slow
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=SCREAMING_SNAKE_CASE_ , align=SCREAMING_SNAKE_CASE_ , do_random_crop=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
lowerCamelCase_ = encoded_inputs.pixel_values.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = outputs.logits.detach().cpu()
lowerCamelCase_ = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_ , target_sizes=[(500, 300)] )
lowerCamelCase_ = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE_ )
| 42 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=33 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ) -> int:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = EsmModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = EsmForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = EsmForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = ()
SCREAMING_SNAKE_CASE_ = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = True
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = EsmModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase_ = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = EsmModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase_ = EsmEmbeddings(config=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCamelCase_ = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCamelCase_ = create_position_ids_from_input_ids(SCREAMING_SNAKE_CASE_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase_ = EsmEmbeddings(config=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.empty(2 , 4 , 30 )
lowerCamelCase_ = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCamelCase_ = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCamelCase_ = embeddings.create_position_ids_from_inputs_embeds(SCREAMING_SNAKE_CASE_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('Esm does not support embedding resizing' )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
with torch.no_grad():
lowerCamelCase_ = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowerCamelCase_ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )[0]
lowerCamelCase_ = 33
lowerCamelCase_ = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
@slow
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
lowerCamelCase_ = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowerCamelCase_ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )[0]
# compare the actual values for a slice.
lowerCamelCase_ = torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 42 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'的',
'价',
'格',
'是',
'15',
'便',
'alex',
'##andra',
',',
'。',
'-',
't',
'shirt',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
lowerCamelCase_ = {
'do_resize': True,
'size': {'height': 224, 'width': 224},
'do_center_crop': True,
'crop_size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'image_std': [0.26_862_954, 0.26_130_258, 0.27_577_711],
'do_convert_rgb': True,
}
lowerCamelCase_ = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' )
lowerCamelCase_ = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
lowerCamelCase_ = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 'Alexandra,T-shirt的价格是15便士。'
lowerCamelCase_ = processor(text=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 'Alexandra,T-shirt的价格是15便士。'
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 'Alexandra,T-shirt的价格是15便士。'
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 42 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 42 | 1 |
'''simple docstring'''
import os
import time
import numpy as np
import onnxruntime as ort
A_ = "1"
A_ = "0"
A_ = "1"
A_ = ort.SessionOptions()
A_ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print("Create inference session...")
A_ = ["TensorrtExecutionProvider", "CUDAExecutionProvider"]
A_ = ort.InferenceSession("model.onnx", sess_options=sess_opt, providers=execution_provider)
A_ = ort.RunOptions()
A_ = 128
A_ = 1
A_ = np.ones((batch, sequence), dtype=np.intaa)
A_ = np.ones((batch, sequence), dtype=np.intaa)
A_ = np.ones((batch, sequence), dtype=np.intaa)
print("Warm up phase...")
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("Start inference...")
A_ = time.time()
A_ = 2_000
A_ = {}
for iter in range(max_iters):
A_ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("Average Inference Time = {:.3f} ms".format((time.time() - start_time) * 1_000 / max_iters))
| 42 |
'''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
),
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'xlm-prophetnet'
SCREAMING_SNAKE_CASE_ = ['past_key_values']
SCREAMING_SNAKE_CASE_ = {
'num_attention_heads': 'num_encoder_attention_heads',
}
def __init__( self , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = "gelu" , SCREAMING_SNAKE_CASE_ = 30522 , SCREAMING_SNAKE_CASE_ = 1024 , SCREAMING_SNAKE_CASE_ = 4096 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 4096 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 0.02 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 128 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 2 , **SCREAMING_SNAKE_CASE_ , ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = encoder_ffn_dim
lowerCamelCase_ = num_encoder_layers
lowerCamelCase_ = num_encoder_attention_heads
lowerCamelCase_ = decoder_ffn_dim
lowerCamelCase_ = num_decoder_layers
lowerCamelCase_ = num_decoder_attention_heads
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = init_std # Normal(0, this parameter)
lowerCamelCase_ = activation_function
# parameters for xlmprophetnet
lowerCamelCase_ = ngram
lowerCamelCase_ = num_buckets
lowerCamelCase_ = relative_max_distance
lowerCamelCase_ = disable_ngram_loss
lowerCamelCase_ = eps
# 3 Types of Dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = dropout
lowerCamelCase_ = use_cache
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , add_cross_attention=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'
' `num_decoder_layers`.' )
| 42 | 1 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
A_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Dict:
'''simple docstring'''
super().__init__()
self.register_modules(
vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ = "auto" ) -> List[str]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> List[str]:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = 1
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = len(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE_ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(SCREAMING_SNAKE_CASE_ )}.''' )
# get prompt text embeddings
lowerCamelCase_ = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
lowerCamelCase_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCamelCase_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowerCamelCase_ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowerCamelCase_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = text_embeddings.shape
lowerCamelCase_ = text_embeddings.repeat(1 , SCREAMING_SNAKE_CASE_ , 1 )
lowerCamelCase_ = text_embeddings.view(bs_embed * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase_ = 42
if negative_prompt is None:
lowerCamelCase_ = ['']
elif type(SCREAMING_SNAKE_CASE_ ) is not type(SCREAMING_SNAKE_CASE_ ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(SCREAMING_SNAKE_CASE_ )} !='''
f''' {type(SCREAMING_SNAKE_CASE_ )}.''' )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = [negative_prompt]
elif batch_size != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(SCREAMING_SNAKE_CASE_ )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
' the batch size of `prompt`.' )
else:
lowerCamelCase_ = negative_prompt
lowerCamelCase_ = text_input_ids.shape[-1]
lowerCamelCase_ = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='pt' , )
lowerCamelCase_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCamelCase_ = uncond_embeddings.shape[1]
lowerCamelCase_ = uncond_embeddings.repeat(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 )
lowerCamelCase_ = uncond_embeddings.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
lowerCamelCase_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCamelCase_ = torch.randn(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='cpu' , dtype=SCREAMING_SNAKE_CASE_ ).to(self.device )
lowerCamelCase_ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='cpu' , dtype=SCREAMING_SNAKE_CASE_ ).to(
self.device )
else:
lowerCamelCase_ = torch.randn(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowerCamelCase_ = latents_reference.to(self.device )
lowerCamelCase_ = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowerCamelCase_ = (latents_shape[3] - latents_shape_reference[3]) // 2
lowerCamelCase_ = (latents_shape[2] - latents_shape_reference[2]) // 2
lowerCamelCase_ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowerCamelCase_ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowerCamelCase_ = 0 if dx < 0 else dx
lowerCamelCase_ = 0 if dy < 0 else dy
lowerCamelCase_ = max(-dx , 0 )
lowerCamelCase_ = max(-dy , 0 )
# import pdb
# pdb.set_trace()
lowerCamelCase_ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCamelCase_ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase_ = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ = {}
if accepts_eta:
lowerCamelCase_ = eta
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
lowerCamelCase_ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCamelCase_ ,lowerCamelCase_ = noise_pred.chunk(2 )
lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 1 / 0.18_215 * latents
lowerCamelCase_ = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
lowerCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
lowerCamelCase_ = self.feature_extractor(self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) , return_tensors='pt' ).to(
self.device )
lowerCamelCase_ ,lowerCamelCase_ = self.safety_checker(
images=SCREAMING_SNAKE_CASE_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
lowerCamelCase_ = None
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ , nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
| 42 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> float:
lowerCamelCase_ = x
lowerCamelCase_ = y
for step in range(__UpperCamelCase ): # noqa: B007
lowerCamelCase_ = a * a - b * b + x
lowerCamelCase_ = 2 * a * b + y
lowerCamelCase_ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _UpperCamelCase ( __UpperCamelCase ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def _UpperCamelCase ( __UpperCamelCase ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(__UpperCamelCase ,1 ,1 ) )
def _UpperCamelCase ( __UpperCamelCase = 8_00 ,__UpperCamelCase = 6_00 ,__UpperCamelCase = -0.6 ,__UpperCamelCase = 0 ,__UpperCamelCase = 3.2 ,__UpperCamelCase = 50 ,__UpperCamelCase = True ,) -> Image.Image:
lowerCamelCase_ = Image.new('RGB' ,(image_width, image_height) )
lowerCamelCase_ = img.load()
# loop through the image-coordinates
for image_x in range(__UpperCamelCase ):
for image_y in range(__UpperCamelCase ):
# determine the figure-coordinates based on the image-coordinates
lowerCamelCase_ = figure_width / image_width * image_height
lowerCamelCase_ = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCamelCase_ = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCamelCase_ = get_distance(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCamelCase_ = get_color_coded_rgb(__UpperCamelCase )
else:
lowerCamelCase_ = get_black_and_white_rgb(__UpperCamelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
A_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 42 | 1 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase__ )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
SCREAMING_SNAKE_CASE_ = Features({'text': Value('string' )} )
SCREAMING_SNAKE_CASE_ = Features({'labels': ClassLabel} )
SCREAMING_SNAKE_CASE_ = "text"
SCREAMING_SNAKE_CASE_ = "labels"
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , SCREAMING_SNAKE_CASE_ ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
lowerCamelCase_ = copy.deepcopy(self )
lowerCamelCase_ = self.label_schema.copy()
lowerCamelCase_ = features[self.label_column]
lowerCamelCase_ = label_schema
return task_template
@property
def UpperCamelCase( self ) -> Dict[str, str]:
'''simple docstring'''
return {
self.text_column: "text",
self.label_column: "labels",
}
| 42 |
'''simple docstring'''
from math import isclose, sqrt
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> tuple[float, float, float]:
lowerCamelCase_ = point_y / 4 / point_x
lowerCamelCase_ = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCamelCase_ = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCamelCase_ = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCamelCase_ = outgoing_gradient**2 + 4
lowerCamelCase_ = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCamelCase_ = (point_y - outgoing_gradient * point_x) ** 2 - 1_00
lowerCamelCase_ = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCamelCase_ = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCamelCase_ = x_minus if isclose(__UpperCamelCase ,__UpperCamelCase ) else x_plus
lowerCamelCase_ = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _UpperCamelCase ( __UpperCamelCase = 1.4 ,__UpperCamelCase = -9.6 ) -> int:
lowerCamelCase_ = 0
lowerCamelCase_ = first_x_coord
lowerCamelCase_ = first_y_coord
lowerCamelCase_ = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = next_point(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 42 | 1 |
'''simple docstring'''
A_ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
A_ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> list[int]:
lowerCamelCase_ = True
lowerCamelCase_ = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
order.append(__UpperCamelCase )
return order
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> list[int]:
lowerCamelCase_ = True
lowerCamelCase_ = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
return component
def _UpperCamelCase ( __UpperCamelCase ) -> list[list[int]]:
lowerCamelCase_ = len(__UpperCamelCase ) * [False]
lowerCamelCase_ = {vert: [] for vert in range(len(__UpperCamelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(__UpperCamelCase )
lowerCamelCase_ = []
for i, was_visited in enumerate(__UpperCamelCase ):
if not was_visited:
order += topology_sort(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
lowerCamelCase_ = []
lowerCamelCase_ = len(__UpperCamelCase ) * [False]
for i in range(len(__UpperCamelCase ) ):
lowerCamelCase_ = order[len(__UpperCamelCase ) - i - 1]
if not visited[vert]:
lowerCamelCase_ = find_components(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
components_list.append(__UpperCamelCase )
return components_list
| 42 |
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = False ) -> bool:
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_31_70_44_06_46_79_88_73_85_96_19_81 and not allow_probable:
raise ValueError(
'Warning: upper bound of deterministic test is exceeded. '
'Pass allow_probable=True to allow probabilistic test. '
'A return value of True indicates a probable prime.' )
# array bounds provided by analysis
lowerCamelCase_ = [
20_47,
1_37_36_53,
25_32_60_01,
32_15_03_17_51,
2_15_23_02_89_87_47,
3_47_47_49_66_03_83,
3_41_55_00_71_72_83_21,
1,
3_82_51_23_05_65_46_41_30_51,
1,
1,
31_86_65_85_78_34_03_11_51_16_74_61,
3_31_70_44_06_46_79_88_73_85_96_19_81,
]
lowerCamelCase_ = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(__UpperCamelCase ,1 ):
if n < _p:
# then we have our last prime to check
lowerCamelCase_ = primes[:idx]
break
lowerCamelCase_ ,lowerCamelCase_ = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowerCamelCase_ = False
for r in range(__UpperCamelCase ):
lowerCamelCase_ = pow(__UpperCamelCase ,d * 2**r ,__UpperCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowerCamelCase_ = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def _UpperCamelCase ( ) -> None:
assert not miller_rabin(5_61 )
assert miller_rabin(5_63 )
# 2047
assert not miller_rabin(83_82_01 )
assert miller_rabin(83_82_07 )
# 1_373_653
assert not miller_rabin(17_31_60_01 )
assert miller_rabin(17_31_60_17 )
# 25_326_001
assert not miller_rabin(30_78_38_66_41 )
assert miller_rabin(30_78_38_66_53 )
# 3_215_031_751
assert not miller_rabin(1_71_30_45_57_48_01 )
assert miller_rabin(1_71_30_45_57_48_19 )
# 2_152_302_898_747
assert not miller_rabin(2_77_97_99_72_83_07 )
assert miller_rabin(2_77_97_99_72_83_27 )
# 3_474_749_660_383
assert not miller_rabin(1_13_85_00_23_90_94_41 )
assert miller_rabin(1_13_85_00_23_90_95_27 )
# 341_550_071_728_321
assert not miller_rabin(1_27_50_41_01_88_48_80_43_51 )
assert miller_rabin(1_27_50_41_01_88_48_80_43_91 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_96_66_46_44_58_50_77_87_79_18_67 )
assert miller_rabin(7_96_66_46_44_58_50_77_87_79_19_51 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(55_28_40_67_74_46_64_78_97_66_03_33 )
assert miller_rabin(55_28_40_67_74_46_64_78_97_66_03_59 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 42 | 1 |
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
A_ = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = AlbertTokenizer
SCREAMING_SNAKE_CASE_ = AlbertTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ = AlbertTokenizer(SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = 'this is a test'
lowerCamelCase_ = 'this is a test'
return input_text, output_text
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = '<pad>'
lowerCamelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '▁eloquent' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 30000 )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = 'I was born in 92000, and this is falsé.'
lowerCamelCase_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = AlbertTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['▁this', '▁is', '▁a', '▁test'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [48, 25, 21, 1289] )
lowerCamelCase_ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'] , )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = AlbertTokenizer(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.encode('sequence builders' )
lowerCamelCase_ = tokenizer.encode('multi-sequence build' )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='albert-base-v2' , revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e' , )
| 42 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A_ = 16
A_ = 32
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = 16 ,__UpperCamelCase = "bert-base-cased" ) -> List[Any]:
lowerCamelCase_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
lowerCamelCase_ = load_dataset('glue' ,'mrpc' )
def tokenize_function(__UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ = tokenizer(examples['sentence1'] ,examples['sentence2'] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase_ = datasets.map(
__UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=['idx', 'sentence1', 'sentence2'] ,load_from_cache_file=__UpperCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ = tokenized_datasets.rename_column('label' ,'labels' )
def collate_fn(__UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCamelCase ,padding='max_length' ,max_length=1_28 ,return_tensors='pt' )
return tokenizer.pad(__UpperCamelCase ,padding='longest' ,return_tensors='pt' )
# Instantiate dataloaders.
lowerCamelCase_ = DataLoader(
tokenized_datasets['train'] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
lowerCamelCase_ = DataLoader(
tokenized_datasets['validation'] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
model.eval()
lowerCamelCase_ = 0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ = model(**__UpperCamelCase )
lowerCamelCase_ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCamelCase_ ,lowerCamelCase_ = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__UpperCamelCase ) - 1:
lowerCamelCase_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCamelCase_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__UpperCamelCase ,references=__UpperCamelCase ,)
lowerCamelCase_ = metric.compute()
return eval_metric["accuracy"]
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> List[str]:
# Initialize accelerator
lowerCamelCase_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ = config['lr']
lowerCamelCase_ = int(config['num_epochs'] )
lowerCamelCase_ = int(config['seed'] )
lowerCamelCase_ = int(config['batch_size'] )
lowerCamelCase_ = args.model_name_or_path
set_seed(__UpperCamelCase )
lowerCamelCase_ ,lowerCamelCase_ = get_dataloaders(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained(__UpperCamelCase ,return_dict=__UpperCamelCase )
# Instantiate optimizer
lowerCamelCase_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase_ = optimizer_cls(params=model.parameters() ,lr=__UpperCamelCase )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase_ = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowerCamelCase_ = 1
lowerCamelCase_ = (len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase_ = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase ,num_warmup_steps=0 ,num_training_steps=__UpperCamelCase ,)
else:
lowerCamelCase_ = DummyScheduler(__UpperCamelCase ,total_num_steps=__UpperCamelCase ,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = accelerator.prepare(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase_ = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase_ = 0
lowerCamelCase_ = evaluate.load('glue' ,'mrpc' )
lowerCamelCase_ = num_epochs
if args.partial_train_epoch is not None:
lowerCamelCase_ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCamelCase_ = args.resume_from_checkpoint.split('epoch_' )[1]
lowerCamelCase_ = ''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCamelCase_ = int(__UpperCamelCase ) + 1
lowerCamelCase_ = evaluation_loop(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
accelerator.print('resumed checkpoint performance:' ,__UpperCamelCase )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' ,lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' ,optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir ,f'''state_{starting_epoch-1}.json''' ) ,'r' ) as f:
lowerCamelCase_ = json.load(__UpperCamelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCamelCase_ = {}
for epoch in range(__UpperCamelCase ,__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
lowerCamelCase_ = model(**__UpperCamelCase )
lowerCamelCase_ = outputs.loss
lowerCamelCase_ = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCamelCase_ = f'''epoch_{epoch}'''
lowerCamelCase_ = os.path.join(args.output_dir ,__UpperCamelCase )
accelerator.save_state(__UpperCamelCase )
lowerCamelCase_ = evaluation_loop(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
lowerCamelCase_ = accuracy
lowerCamelCase_ = lr_scheduler.get_lr()[0]
lowerCamelCase_ = optimizer.param_groups[0]['lr']
lowerCamelCase_ = epoch
lowerCamelCase_ = overall_step
accelerator.print(f'''epoch {epoch}:''' ,__UpperCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,f'''state_{epoch}.json''' ) ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCamelCase ( ) -> str:
lowerCamelCase_ = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' ,type=__UpperCamelCase ,default='bert-base-cased' ,help='Path to pretrained model or model identifier from huggingface.co/models.' ,required=__UpperCamelCase ,)
parser.add_argument(
'--output_dir' ,type=__UpperCamelCase ,default='.' ,help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' ,)
parser.add_argument(
'--resume_from_checkpoint' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help='If the training should continue from a checkpoint folder.' ,)
parser.add_argument(
'--partial_train_epoch' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help='If passed, the training will stop after this number of epochs.' ,)
parser.add_argument(
'--num_epochs' ,type=__UpperCamelCase ,default=2 ,help='Number of train epochs.' ,)
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
main()
| 42 | 1 |
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
A_ = "facebook/wmt19-en-de"
A_ = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
A_ = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
A_ = FSMTForConditionalGeneration(config)
print(f'''num of params {tiny_model.num_parameters()}''')
# Test
A_ = tokenizer(["Making tiny model"], return_tensors="pt")
A_ = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
A_ = "tiny-wmt19-en-de"
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 42 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> np.ndarray:
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
lowerCamelCase_ = ksize + 1
lowerCamelCase_ = np.zeros((ksize, ksize) ,dtype=np.floataa )
# each value
for y in range(__UpperCamelCase ):
for x in range(__UpperCamelCase ):
# distance from center
lowerCamelCase_ = x - ksize // 2
lowerCamelCase_ = y - ksize // 2
# degree to radiant
lowerCamelCase_ = theta / 1_80 * np.pi
lowerCamelCase_ = np.cos(_theta )
lowerCamelCase_ = np.sin(_theta )
# get kernel x
lowerCamelCase_ = cos_theta * px + sin_theta * py
# get kernel y
lowerCamelCase_ = -sin_theta * px + cos_theta * py
# fill kernel
lowerCamelCase_ = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
A_ = imread("../image_data/lena.jpg")
# turn image in gray scale value
A_ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
A_ = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
A_ = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
A_ = out / out.max() * 255
A_ = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 42 | 1 |
'''simple docstring'''
from math import pow
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) -> tuple[int, int]:
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
lowerCamelCase_ = int(pow(__UpperCamelCase ,__UpperCamelCase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
lowerCamelCase_ ,lowerCamelCase_ = backtrack(
__UpperCamelCase ,__UpperCamelCase ,current_number + 1 ,__UpperCamelCase ,__UpperCamelCase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
lowerCamelCase_ ,lowerCamelCase_ = backtrack(
__UpperCamelCase ,__UpperCamelCase ,current_number + 1 ,__UpperCamelCase ,__UpperCamelCase )
return current_sum, solutions_count
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> int:
if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10):
raise ValueError(
'Invalid input\n'
'needed_sum must be between 1 and 1000, power between 2 and 10.' )
return backtrack(__UpperCamelCase ,__UpperCamelCase ,1 ,0 ,0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = ['transformers', 'torch', 'note_seq']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def UpperCamelCase( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def UpperCamelCase( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 42 | 1 |
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE_ = 'AutoImageProcessor'
SCREAMING_SNAKE_CASE_ = 'AutoTokenizer'
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = kwargs.pop('feature_extractor' )
lowerCamelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.image_processor
lowerCamelCase_ = False
def __call__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = kwargs.pop('images' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = kwargs.pop('text' , SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowerCamelCase_ = args[0]
lowerCamelCase_ = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
lowerCamelCase_ = self.image_processor(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if text is not None:
lowerCamelCase_ = self.tokenizer(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCamelCase_ = encodings['input_ids']
return inputs
def UpperCamelCase( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@contextmanager
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.' )
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer
yield
lowerCamelCase_ = self.image_processor
lowerCamelCase_ = False
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=None ) -> Tuple:
'''simple docstring'''
if added_vocab is None:
lowerCamelCase_ = self.tokenizer.get_added_vocab()
lowerCamelCase_ = {}
while tokens:
lowerCamelCase_ = re.search(r'<s_(.*?)>' , SCREAMING_SNAKE_CASE_ , re.IGNORECASE )
if start_token is None:
break
lowerCamelCase_ = start_token.group(1 )
lowerCamelCase_ = re.search(rf'''</s_{key}>''' , SCREAMING_SNAKE_CASE_ , re.IGNORECASE )
lowerCamelCase_ = start_token.group()
if end_token is None:
lowerCamelCase_ = tokens.replace(SCREAMING_SNAKE_CASE_ , '' )
else:
lowerCamelCase_ = end_token.group()
lowerCamelCase_ = re.escape(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = re.escape(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = re.search(f'''{start_token_escaped}(.*?){end_token_escaped}''' , SCREAMING_SNAKE_CASE_ , re.IGNORECASE )
if content is not None:
lowerCamelCase_ = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowerCamelCase_ = self.tokenajson(SCREAMING_SNAKE_CASE_ , is_inner_value=SCREAMING_SNAKE_CASE_ , added_vocab=SCREAMING_SNAKE_CASE_ )
if value:
if len(SCREAMING_SNAKE_CASE_ ) == 1:
lowerCamelCase_ = value[0]
lowerCamelCase_ = value
else: # leaf nodes
lowerCamelCase_ = []
for leaf in content.split(r'<sep/>' ):
lowerCamelCase_ = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowerCamelCase_ = leaf[1:-2] # for categorical special tokens
output[key].append(SCREAMING_SNAKE_CASE_ )
if len(output[key] ) == 1:
lowerCamelCase_ = output[key][0]
lowerCamelCase_ = tokens[tokens.find(SCREAMING_SNAKE_CASE_ ) + len(SCREAMING_SNAKE_CASE_ ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=SCREAMING_SNAKE_CASE_ , added_vocab=SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , SCREAMING_SNAKE_CASE_ , )
return self.image_processor_class
@property
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , SCREAMING_SNAKE_CASE_ , )
return self.image_processor
| 42 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return model
@property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , cross_attention_dim=10 , )
return model
@property
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , )
lowerCamelCase_ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return vqvae, unet
@slow
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
lowerCamelCase_ = DDPMScheduler()
lowerCamelCase_ = AudioDiffusionPipeline(vqvae=SCREAMING_SNAKE_CASE_ , unet=self.dummy_unet , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , steps=4 )
lowerCamelCase_ = output.audios[0]
lowerCamelCase_ = output.images[0]
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , steps=4 , return_dict=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.frombuffer(image_from_tuple.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowerCamelCase_ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
lowerCamelCase_ = DDIMScheduler()
lowerCamelCase_ = self.dummy_vqvae_and_unet
lowerCamelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
np.random.seed(0 )
lowerCamelCase_ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(raw_audio=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , start_step=5 , steps=10 )
lowerCamelCase_ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowerCamelCase_ = self.dummy_unet_condition
lowerCamelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=SCREAMING_SNAKE_CASE_ , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
np.random.seed(0 )
lowerCamelCase_ = torch.rand((1, 1, 10) )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , encoding=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output.images[0]
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = torch_device
lowerCamelCase_ = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output.audios[0]
lowerCamelCase_ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 42 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 42
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE_=(64,) , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_="silu" , SCREAMING_SNAKE_CASE_=True , ) -> str:
'''simple docstring'''
super().__init__()
lowerCamelCase_ = layers_per_block
lowerCamelCase_ = torch.nn.Convad(
SCREAMING_SNAKE_CASE_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
lowerCamelCase_ = None
lowerCamelCase_ = nn.ModuleList([] )
# down
lowerCamelCase_ = block_out_channels[0]
for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = output_channel
lowerCamelCase_ = block_out_channels[i]
lowerCamelCase_ = i == len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCamelCase_ = get_down_block(
SCREAMING_SNAKE_CASE_ , num_layers=self.layers_per_block , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=SCREAMING_SNAKE_CASE_ , resnet_groups=SCREAMING_SNAKE_CASE_ , attention_head_dim=SCREAMING_SNAKE_CASE_ , temb_channels=SCREAMING_SNAKE_CASE_ , )
self.down_blocks.append(SCREAMING_SNAKE_CASE_ )
# mid
lowerCamelCase_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE_ , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE_ , temb_channels=SCREAMING_SNAKE_CASE_ , )
# out
lowerCamelCase_ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=SCREAMING_SNAKE_CASE_ , eps=1E-6 )
lowerCamelCase_ = nn.SiLU()
lowerCamelCase_ = 2 * out_channels if double_z else out_channels
lowerCamelCase_ = nn.Convad(block_out_channels[-1] , SCREAMING_SNAKE_CASE_ , 3 , padding=1 )
lowerCamelCase_ = False
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = x
lowerCamelCase_ = self.conv_in(SCREAMING_SNAKE_CASE_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(SCREAMING_SNAKE_CASE_ ):
def custom_forward(*SCREAMING_SNAKE_CASE_ ):
return module(*SCREAMING_SNAKE_CASE_ )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
lowerCamelCase_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , use_reentrant=SCREAMING_SNAKE_CASE_ )
# middle
lowerCamelCase_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE_ , use_reentrant=SCREAMING_SNAKE_CASE_ )
else:
for down_block in self.down_blocks:
lowerCamelCase_ = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# middle
lowerCamelCase_ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE_ )
else:
# down
for down_block in self.down_blocks:
lowerCamelCase_ = down_block(SCREAMING_SNAKE_CASE_ )
# middle
lowerCamelCase_ = self.mid_block(SCREAMING_SNAKE_CASE_ )
# post-process
lowerCamelCase_ = self.conv_norm_out(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.conv_act(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.conv_out(SCREAMING_SNAKE_CASE_ )
return sample
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE_=(64,) , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_="silu" , SCREAMING_SNAKE_CASE_="group" , ) -> Any:
'''simple docstring'''
super().__init__()
lowerCamelCase_ = layers_per_block
lowerCamelCase_ = nn.Convad(
SCREAMING_SNAKE_CASE_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
lowerCamelCase_ = None
lowerCamelCase_ = nn.ModuleList([] )
lowerCamelCase_ = in_channels if norm_type == 'spatial' else None
# mid
lowerCamelCase_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE_ , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE_ , temb_channels=SCREAMING_SNAKE_CASE_ , )
# up
lowerCamelCase_ = list(reversed(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = output_channel
lowerCamelCase_ = reversed_block_out_channels[i]
lowerCamelCase_ = i == len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCamelCase_ = get_up_block(
SCREAMING_SNAKE_CASE_ , num_layers=self.layers_per_block + 1 , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , prev_output_channel=SCREAMING_SNAKE_CASE_ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE_ , resnet_groups=SCREAMING_SNAKE_CASE_ , attention_head_dim=SCREAMING_SNAKE_CASE_ , temb_channels=SCREAMING_SNAKE_CASE_ , resnet_time_scale_shift=SCREAMING_SNAKE_CASE_ , )
self.up_blocks.append(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output_channel
# out
if norm_type == "spatial":
lowerCamelCase_ = SpatialNorm(block_out_channels[0] , SCREAMING_SNAKE_CASE_ )
else:
lowerCamelCase_ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=SCREAMING_SNAKE_CASE_ , eps=1E-6 )
lowerCamelCase_ = nn.SiLU()
lowerCamelCase_ = nn.Convad(block_out_channels[0] , SCREAMING_SNAKE_CASE_ , 3 , padding=1 )
lowerCamelCase_ = False
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> str:
'''simple docstring'''
lowerCamelCase_ = z
lowerCamelCase_ = self.conv_in(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(SCREAMING_SNAKE_CASE_ ):
def custom_forward(*SCREAMING_SNAKE_CASE_ ):
return module(*SCREAMING_SNAKE_CASE_ )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
lowerCamelCase_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , use_reentrant=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = sample.to(SCREAMING_SNAKE_CASE_ )
# up
for up_block in self.up_blocks:
lowerCamelCase_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , use_reentrant=SCREAMING_SNAKE_CASE_ )
else:
# middle
lowerCamelCase_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = sample.to(SCREAMING_SNAKE_CASE_ )
# up
for up_block in self.up_blocks:
lowerCamelCase_ = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
# middle
lowerCamelCase_ = self.mid_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = sample.to(SCREAMING_SNAKE_CASE_ )
# up
for up_block in self.up_blocks:
lowerCamelCase_ = up_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# post-process
if latent_embeds is None:
lowerCamelCase_ = self.conv_norm_out(SCREAMING_SNAKE_CASE_ )
else:
lowerCamelCase_ = self.conv_norm_out(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.conv_act(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.conv_out(SCREAMING_SNAKE_CASE_ )
return sample
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="random" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True ) -> Optional[int]:
'''simple docstring'''
super().__init__()
lowerCamelCase_ = n_e
lowerCamelCase_ = vq_embed_dim
lowerCamelCase_ = beta
lowerCamelCase_ = legacy
lowerCamelCase_ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
lowerCamelCase_ = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
lowerCamelCase_ = self.used.shape[0]
lowerCamelCase_ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
lowerCamelCase_ = self.re_embed
lowerCamelCase_ = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
lowerCamelCase_ = n_e
lowerCamelCase_ = sane_index_shape
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
lowerCamelCase_ = inds.shape
assert len(SCREAMING_SNAKE_CASE_ ) > 1
lowerCamelCase_ = inds.reshape(ishape[0] , -1 )
lowerCamelCase_ = self.used.to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = (inds[:, :, None] == used[None, None, ...]).long()
lowerCamelCase_ = match.argmax(-1 )
lowerCamelCase_ = match.sum(2 ) < 1
if self.unknown_index == "random":
lowerCamelCase_ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
lowerCamelCase_ = self.unknown_index
return new.reshape(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = inds.shape
assert len(SCREAMING_SNAKE_CASE_ ) > 1
lowerCamelCase_ = inds.reshape(ishape[0] , -1 )
lowerCamelCase_ = self.used.to(SCREAMING_SNAKE_CASE_ )
if self.re_embed > self.used.shape[0]: # extra token
lowerCamelCase_ = 0 # simply set to zero
lowerCamelCase_ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , SCREAMING_SNAKE_CASE_ )
return back.reshape(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = z.permute(0 , 2 , 3 , 1 ).contiguous()
lowerCamelCase_ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
lowerCamelCase_ = torch.argmin(torch.cdist(SCREAMING_SNAKE_CASE_ , self.embedding.weight ) , dim=1 )
lowerCamelCase_ = self.embedding(SCREAMING_SNAKE_CASE_ ).view(z.shape )
lowerCamelCase_ = None
lowerCamelCase_ = None
# compute loss for embedding
if not self.legacy:
lowerCamelCase_ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
lowerCamelCase_ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
lowerCamelCase_ = z + (z_q - z).detach()
# reshape back to match original input shape
lowerCamelCase_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
lowerCamelCase_ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
lowerCamelCase_ = self.remap_to_used(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
lowerCamelCase_ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
if self.remap is not None:
lowerCamelCase_ = indices.reshape(shape[0] , -1 ) # add batch axis
lowerCamelCase_ = self.unmap_to_all(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
lowerCamelCase_ = self.embedding(SCREAMING_SNAKE_CASE_ )
if shape is not None:
lowerCamelCase_ = z_q.view(SCREAMING_SNAKE_CASE_ )
# reshape back to match original input shape
lowerCamelCase_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = parameters
lowerCamelCase_ ,lowerCamelCase_ = torch.chunk(SCREAMING_SNAKE_CASE_ , 2 , dim=1 )
lowerCamelCase_ = torch.clamp(self.logvar , -30.0 , 20.0 )
lowerCamelCase_ = deterministic
lowerCamelCase_ = torch.exp(0.5 * self.logvar )
lowerCamelCase_ = torch.exp(self.logvar )
if self.deterministic:
lowerCamelCase_ = lowerCamelCase_ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ = None ) -> torch.FloatTensor:
'''simple docstring'''
lowerCamelCase_ = randn_tensor(
self.mean.shape , generator=SCREAMING_SNAKE_CASE_ , device=self.parameters.device , dtype=self.parameters.dtype )
lowerCamelCase_ = self.mean + self.std * sample
return x
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_=None ) -> Optional[int]:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=[1, 2, 3] ) -> Union[str, Any]:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
lowerCamelCase_ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
return self.mean
| 42 |
'''simple docstring'''
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _UpperCamelCase ( __UpperCamelCase = 8 ) -> str:
lowerCamelCase_ = ascii_letters + digits + punctuation
return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(__UpperCamelCase )
lowerCamelCase_ = i // 3
lowerCamelCase_ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowerCamelCase_ = (
chars_incl
+ random(__UpperCamelCase ,quotient + remainder )
+ random(__UpperCamelCase ,__UpperCamelCase )
+ random(__UpperCamelCase ,__UpperCamelCase )
)
lowerCamelCase_ = list(__UpperCamelCase )
shuffle(__UpperCamelCase )
return "".join(__UpperCamelCase )
# random is a generalised function for letters, characters and numbers
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
pass # Put your code here...
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Tuple:
pass # Put your code here...
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
pass # Put your code here...
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = 8 ) -> bool:
if len(__UpperCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
lowerCamelCase_ = any(char in ascii_uppercase for char in password )
lowerCamelCase_ = any(char in ascii_lowercase for char in password )
lowerCamelCase_ = any(char in digits for char in password )
lowerCamelCase_ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _UpperCamelCase ( ) -> Optional[int]:
lowerCamelCase_ = int(input('Please indicate the max length of your password: ' ).strip() )
lowerCamelCase_ = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' ,password_generator(__UpperCamelCase ) )
print(
'Alternative Password generated:' ,alternative_password_generator(__UpperCamelCase ,__UpperCamelCase ) ,)
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 42 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 42 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = inspect.getfile(accelerate.test_utils )
lowerCamelCase_ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase_ = test_metrics
@require_cpu
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
debug_launcher(self.test_metrics.main )
@require_single_gpu
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
print(f'''Found {torch.cuda.device_count()} devices.''' )
lowerCamelCase_ = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
| 42 | 1 |
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
if not (isinstance(__UpperCamelCase ,__UpperCamelCase ) and isinstance(__UpperCamelCase ,__UpperCamelCase )):
raise ValueError('longest_common_substring() takes two strings for inputs' )
lowerCamelCase_ = len(__UpperCamelCase )
lowerCamelCase_ = len(__UpperCamelCase )
lowerCamelCase_ = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for i in range(1 ,texta_length + 1 ):
for j in range(1 ,texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
lowerCamelCase_ = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
lowerCamelCase_ = i
lowerCamelCase_ = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def _UpperCamelCase ( __UpperCamelCase ) -> List[str]:
if hor == 1_28:
lowerCamelCase_ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
lowerCamelCase_ = (32, 1_28, 2_56)
lowerCamelCase_ = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
lowerCamelCase_ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
lowerCamelCase_ = (32, 64, 1_28, 2_56)
lowerCamelCase_ = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
lowerCamelCase_ = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
lowerCamelCase_ = model.state_dict()
lowerCamelCase_ = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 6_55_36,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
lowerCamelCase_ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase_ = dict(zip(model.state_dict().keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase_ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() ,f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCamelCase ( ) -> Tuple:
lowerCamelCase_ = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 1_28, 2_56),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 6_55_36,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
lowerCamelCase_ = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
lowerCamelCase_ = model
lowerCamelCase_ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase_ = dict(zip(state_dict.keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase_ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() ,'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 42 | 1 |
'''simple docstring'''
import math
import tensorflow as tf
from packaging import version
def _UpperCamelCase ( __UpperCamelCase ) -> Any:
lowerCamelCase_ = tf.convert_to_tensor(__UpperCamelCase )
lowerCamelCase_ = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) ,x.dtype ) ))
return x * cdf
def _UpperCamelCase ( __UpperCamelCase ) -> Optional[int]:
lowerCamelCase_ = tf.convert_to_tensor(__UpperCamelCase )
lowerCamelCase_ = tf.cast(math.pi ,x.dtype )
lowerCamelCase_ = tf.cast(0.04_4715 ,x.dtype )
lowerCamelCase_ = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__UpperCamelCase ,3 )) ))
return x * cdf
def _UpperCamelCase ( __UpperCamelCase ) -> Optional[Any]:
lowerCamelCase_ = tf.convert_to_tensor(__UpperCamelCase )
return x * tf.tanh(tf.math.softplus(__UpperCamelCase ) )
def _UpperCamelCase ( __UpperCamelCase ) -> str:
lowerCamelCase_ = tf.convert_to_tensor(__UpperCamelCase )
lowerCamelCase_ = tf.cast(0.04_4715 ,x.dtype )
lowerCamelCase_ = tf.cast(0.79_7884_5608 ,x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def _UpperCamelCase ( __UpperCamelCase ) -> str:
lowerCamelCase_ = tf.convert_to_tensor(__UpperCamelCase )
lowerCamelCase_ = tf.cast(1.702 ,x.dtype )
return x * tf.math.sigmoid(coeff * x )
def _UpperCamelCase ( __UpperCamelCase ) -> int:
return tf.clip_by_value(_gelu(__UpperCamelCase ) ,-10 ,10 )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase=-1 ) -> List[Any]:
lowerCamelCase_ ,lowerCamelCase_ = tf.split(__UpperCamelCase ,2 ,axis=__UpperCamelCase )
return a * tf.math.sigmoid(__UpperCamelCase )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def _UpperCamelCase ( __UpperCamelCase ) -> Dict:
return tf.keras.activations.gelu(__UpperCamelCase ,approximate=__UpperCamelCase )
A_ = tf.keras.activations.gelu
A_ = approximate_gelu_wrap
else:
A_ = _gelu
A_ = _gelu_new
A_ = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def _UpperCamelCase ( __UpperCamelCase ) -> Tuple:
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 42 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A_ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A_ = 250_004
A_ = 250_020
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = MBartTokenizer
SCREAMING_SNAKE_CASE_ = MBartTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ = MBartTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = MBartTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCamelCase_ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase_ = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
lowerCamelCase_ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=True
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=False
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'facebook/mbart-large-en-ro'
SCREAMING_SNAKE_CASE_ = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
SCREAMING_SNAKE_CASE_ = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
SCREAMING_SNAKE_CASE_ = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE]
@classmethod
def UpperCamelCase( cls ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
lowerCamelCase_ = 1
return cls
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 250020 )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
self.assertIn(SCREAMING_SNAKE_CASE_ , self.tokenizer.all_special_ids )
lowerCamelCase_ = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
lowerCamelCase_ = self.tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 10
lowerCamelCase_ = self.tokenizer(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [250026, 250001] )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = MBartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , SCREAMING_SNAKE_CASE_ )
@require_torch
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
lowerCamelCase_ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
lowerCamelCase_ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
lowerCamelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(self.src_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=3 , return_tensors='pt' )
lowerCamelCase_ = self.tokenizer(
text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=10 , return_tensors='pt' )
lowerCamelCase_ = targets['input_ids']
lowerCamelCase_ = shift_tokens_right(SCREAMING_SNAKE_CASE_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , {
# A, test, EOS, en_XX
'input_ids': [[62, 3034, 2, 250004]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 250001,
} , )
| 42 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'time_series_transformer'
SCREAMING_SNAKE_CASE_ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "student_t" , SCREAMING_SNAKE_CASE_ = "nll" , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = [1, 2, 3, 4, 5, 6, 7] , SCREAMING_SNAKE_CASE_ = "mean" , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = "gelu" , SCREAMING_SNAKE_CASE_ = 64 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 100 , SCREAMING_SNAKE_CASE_ = 0.02 , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = prediction_length
lowerCamelCase_ = context_length or prediction_length
lowerCamelCase_ = distribution_output
lowerCamelCase_ = loss
lowerCamelCase_ = input_size
lowerCamelCase_ = num_time_features
lowerCamelCase_ = lags_sequence
lowerCamelCase_ = scaling
lowerCamelCase_ = num_dynamic_real_features
lowerCamelCase_ = num_static_real_features
lowerCamelCase_ = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
lowerCamelCase_ = cardinality
else:
lowerCamelCase_ = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
lowerCamelCase_ = embedding_dimension
else:
lowerCamelCase_ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase_ = num_parallel_samples
# Transformer architecture configuration
lowerCamelCase_ = input_size * len(SCREAMING_SNAKE_CASE_ ) + self._number_of_features
lowerCamelCase_ = d_model
lowerCamelCase_ = encoder_attention_heads
lowerCamelCase_ = decoder_attention_heads
lowerCamelCase_ = encoder_ffn_dim
lowerCamelCase_ = decoder_ffn_dim
lowerCamelCase_ = encoder_layers
lowerCamelCase_ = decoder_layers
lowerCamelCase_ = dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = encoder_layerdrop
lowerCamelCase_ = decoder_layerdrop
lowerCamelCase_ = activation_function
lowerCamelCase_ = init_std
lowerCamelCase_ = use_cache
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 42 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = 'ylacombe/bark-small'
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = 'en_speaker_1'
lowerCamelCase_ = 'This is a test string'
lowerCamelCase_ = 'speaker_embeddings_path.json'
lowerCamelCase_ = 'speaker_embeddings'
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowerCamelCase_ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCamelCase_ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowerCamelCase_ = 35
lowerCamelCase_ = 2
lowerCamelCase_ = 8
lowerCamelCase_ = {
'semantic_prompt': np.ones(SCREAMING_SNAKE_CASE_ ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowerCamelCase_ = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowerCamelCase_ = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowerCamelCase_ = processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = processor(text=self.input_string )
lowerCamelCase_ = tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 42 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
A_ = False
@skip_mps
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = StableDiffusionAttendAndExcitePipeline
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE_ = TEXT_TO_IMAGE_BATCH_PARAMS.union({'token_indices'} )
SCREAMING_SNAKE_CASE_ = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE_ = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def UpperCamelCase( cls ) -> Optional[int]:
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE_ )
@classmethod
def UpperCamelCase( cls ) -> str:
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , )
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
lowerCamelCase_ = CLIPTextModel(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCamelCase_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ) -> int:
'''simple docstring'''
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
lowerCamelCase_ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = lowerCamelCase_ = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = 'cpu'
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe(**SCREAMING_SNAKE_CASE_ ).images
lowerCamelCase_ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
lowerCamelCase_ = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
lowerCamelCase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE_ , 1E-3 )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5E-4 )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCamelCase( cls ) -> List[str]:
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE_ )
@classmethod
def UpperCamelCase( cls ) -> List[str]:
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = torch.manual_seed(51 )
lowerCamelCase_ = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , safety_checker=SCREAMING_SNAKE_CASE_ , torch_dtype=torch.floataa )
pipe.to('cuda' )
lowerCamelCase_ = 'a painting of an elephant with glasses'
lowerCamelCase_ = [5, 7]
lowerCamelCase_ = pipe(
prompt=SCREAMING_SNAKE_CASE_ , token_indices=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0]
lowerCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 42 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
A_ = {
"vocab_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
},
"merges_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
},
}
A_ = {
"allenai/longformer-base-4096": 4_096,
"allenai/longformer-large-4096": 4_096,
"allenai/longformer-large-4096-finetuned-triviaqa": 4_096,
"allenai/longformer-base-4096-extra.pos.embd.only": 4_096,
"allenai/longformer-large-4096-extra.pos.embd.only": 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCamelCase ( ) -> Any:
lowerCamelCase_ = (
list(range(ord('!' ) ,ord('~' ) + 1 ) ) + list(range(ord('¡' ) ,ord('¬' ) + 1 ) ) + list(range(ord('®' ) ,ord('ÿ' ) + 1 ) )
)
lowerCamelCase_ = bs[:]
lowerCamelCase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCamelCase )
cs.append(2**8 + n )
n += 1
lowerCamelCase_ = [chr(__UpperCamelCase ) for n in cs]
return dict(zip(__UpperCamelCase ,__UpperCamelCase ) )
def _UpperCamelCase ( __UpperCamelCase ) -> List[str]:
lowerCamelCase_ = set()
lowerCamelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase_ = char
return pairs
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask']
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="replace" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as vocab_handle:
lowerCamelCase_ = json.load(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {v: k for k, v in self.encoder.items()}
lowerCamelCase_ = errors # how to handle errors in decoding
lowerCamelCase_ = bytes_to_unicode()
lowerCamelCase_ = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as merges_handle:
lowerCamelCase_ = merges_handle.read().split('\n' )[1:-1]
lowerCamelCase_ = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCamelCase_ = {}
lowerCamelCase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
return len(self.encoder )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCamelCase_ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
lowerCamelCase_ = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_ ,lowerCamelCase_ = bigram
lowerCamelCase_ = []
lowerCamelCase_ = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
lowerCamelCase_ = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase_ = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
lowerCamelCase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = ' '.join(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = word
return word
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
lowerCamelCase_ = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(' ' ) )
return bpe_tokens
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = ''.join(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '\n' )
lowerCamelCase_ = 0
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
lowerCamelCase_ = token_index
writer.write(' '.join(SCREAMING_SNAKE_CASE_ ) + '\n' )
index += 1
return vocab_file, merge_file
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
lowerCamelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()):
lowerCamelCase_ = ' ' + text
return (text, kwargs)
| 42 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ,__UpperCamelCase=None ) -> Optional[Any]:
if attention_mask is None:
lowerCamelCase_ = tf.cast(tf.math.not_equal(__UpperCamelCase ,config.pad_token_id ) ,tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = OPTConfig
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = 'gelu'
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=20 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=16 , ) -> int:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = eos_token_id
lowerCamelCase_ = pad_token_id
lowerCamelCase_ = bos_token_id
lowerCamelCase_ = embed_dim
lowerCamelCase_ = word_embed_proj_dim
lowerCamelCase_ = False
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCamelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCamelCase_ = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , **self.config_updates , )
lowerCamelCase_ = prepare_opt_inputs_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return config, inputs_dict
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = TFOPTModel(config=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inputs_dict['input_ids']
lowerCamelCase_ = input_ids[:1, :]
lowerCamelCase_ = inputs_dict['attention_mask'][:1, :]
lowerCamelCase_ = 1
# first forward pass
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ ,lowerCamelCase_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCamelCase_ = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCamelCase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCamelCase_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCamelCase_ = output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=1E-3 )
@require_tf
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ = (TFOPTForCausalLM,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ = (
{'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = 10
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = TFOPTModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if hasattr(SCREAMING_SNAKE_CASE_ , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(SCREAMING_SNAKE_CASE_ , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
lowerCamelCase_ = model_class(config=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = _get_word_embedding_weight(SCREAMING_SNAKE_CASE_ , model.get_input_embeddings() )
lowerCamelCase_ = _get_word_embedding_weight(SCREAMING_SNAKE_CASE_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = _get_word_embedding_weight(SCREAMING_SNAKE_CASE_ , model.get_input_embeddings() )
lowerCamelCase_ = _get_word_embedding_weight(SCREAMING_SNAKE_CASE_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
lowerCamelCase_ = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , SCREAMING_SNAKE_CASE_ )
# check that weights remain the same after resizing
lowerCamelCase_ = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCamelCase_ = False
self.assertTrue(SCREAMING_SNAKE_CASE_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCamelCase_ = False
self.assertTrue(SCREAMING_SNAKE_CASE_ )
def _UpperCamelCase ( __UpperCamelCase ) -> int:
return tf.constant(__UpperCamelCase ,dtype=tf.intaa )
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 99
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = tf.ones((4, 1) , dtype=tf.intaa ) * 2
lowerCamelCase_ = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
lowerCamelCase_ = input_ids.shape[0]
lowerCamelCase_ = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = TFOPTModel.from_pretrained('facebook/opt-350m' )
lowerCamelCase_ = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
lowerCamelCase_ = tf.not_equal(SCREAMING_SNAKE_CASE_ , model.config.pad_token_id )
with tf.GradientTape():
lowerCamelCase_ = model(input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).last_hidden_state
lowerCamelCase_ = (1, 11, 512)
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=4E-3 ) )
lowerCamelCase_ = tf.function(SCREAMING_SNAKE_CASE_ , jit_compile=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = xla_generate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=4E-2 ) )
@require_tf
@slow
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
lowerCamelCase_ = 'facebook/opt-350m'
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = TFOPTForCausalLM.from_pretrained(self.path_model )
lowerCamelCase_ = GPTaTokenizer.from_pretrained(self.path_model )
lowerCamelCase_ = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='tf' , padding=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
lowerCamelCase_ = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
lowerCamelCase_ = tf.function(SCREAMING_SNAKE_CASE_ , jit_compile=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
@require_tf
@slow
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = 'facebook/opt-125m'
lowerCamelCase_ = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
lowerCamelCase_ = []
lowerCamelCase_ = GPTaTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = TFOPTForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
for prompt in self.prompts:
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='tf' ).input_ids
lowerCamelCase_ = model.generate(SCREAMING_SNAKE_CASE_ , max_length=10 )
lowerCamelCase_ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
predicted_outputs += generated_string
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = 'facebook/opt-350m'
lowerCamelCase_ = GPTaTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = TFOPTForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 'left'
# use different length sentences to test batching
lowerCamelCase_ = [
'Hello, my dog is a little',
'Today, I',
]
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='tf' , padding=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inputs['input_ids']
lowerCamelCase_ = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=inputs['attention_mask'] )
lowerCamelCase_ = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
lowerCamelCase_ = model.generate(input_ids=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
lowerCamelCase_ = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
lowerCamelCase_ = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , max_length=model.config.max_length - num_paddings )
lowerCamelCase_ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , [non_padded_sentence, padded_sentence] )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = 'facebook/opt-350m'
lowerCamelCase_ = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
lowerCamelCase_ = []
lowerCamelCase_ = GPTaTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = TFOPTForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
for prompt in self.prompts:
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='tf' ).input_ids
lowerCamelCase_ = model.generate(SCREAMING_SNAKE_CASE_ , max_length=10 )
lowerCamelCase_ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
predicted_outputs += generated_string
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 42 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
A_ = {
"vocab_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt",
},
"tokenizer_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"
),
"google/realm-orqa-nq-openqa": (
"https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-nq-reader": (
"https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-openqa": (
"https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-reader": (
"https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"
),
},
}
A_ = {
"google/realm-cc-news-pretrained-embedder": 512,
"google/realm-cc-news-pretrained-encoder": 512,
"google/realm-cc-news-pretrained-scorer": 512,
"google/realm-cc-news-pretrained-openqa": 512,
"google/realm-orqa-nq-openqa": 512,
"google/realm-orqa-nq-reader": 512,
"google/realm-orqa-wq-openqa": 512,
"google/realm-orqa-wq-reader": 512,
}
A_ = {
"google/realm-cc-news-pretrained-embedder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-encoder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-scorer": {"do_lower_case": True},
"google/realm-cc-news-pretrained-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-reader": {"do_lower_case": True},
"google/realm-orqa-wq-openqa": {"do_lower_case": True},
"google/realm-orqa-wq-reader": {"do_lower_case": True},
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = RealmTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="[UNK]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[PAD]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> List[Any]:
'''simple docstring'''
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE_ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE_ ) != tokenize_chinese_chars
):
lowerCamelCase_ = getattr(SCREAMING_SNAKE_CASE_ , normalizer_state.pop('type' ) )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = strip_accents
lowerCamelCase_ = tokenize_chinese_chars
lowerCamelCase_ = normalizer_class(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = do_lower_case
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = PaddingStrategy.MAX_LENGTH
lowerCamelCase_ = text
lowerCamelCase_ = kwargs.pop('text_pair' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = kwargs.pop('return_tensors' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(SCREAMING_SNAKE_CASE_ ):
if batch_text_pair is not None:
lowerCamelCase_ = batch_text_pair[idx]
else:
lowerCamelCase_ = None
lowerCamelCase_ = super().__call__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = encoded_candidates.get('input_ids' )
lowerCamelCase_ = encoded_candidates.get('attention_mask' )
lowerCamelCase_ = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(SCREAMING_SNAKE_CASE_ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(SCREAMING_SNAKE_CASE_ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {key: item for key, item in output_data.items() if len(SCREAMING_SNAKE_CASE_ ) != 0}
return BatchEncoding(SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
'''simple docstring'''
lowerCamelCase_ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 42 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.